diff --git a/.github/actions/tune-runner-vm/action.yml b/.github/actions/tune-runner-vm/action.yml index 30cf183e72a45..e8914dbe74f6c 100644 --- a/.github/actions/tune-runner-vm/action.yml +++ b/.github/actions/tune-runner-vm/action.yml @@ -33,8 +33,10 @@ runs: # Set vm.swappiness=1 to avoid swapping and allow high RAM usage echo 1 | sudo tee /proc/sys/vm/swappiness # Set swappiness to 1 for all cgroups and sub-groups - for swappiness_file in /sys/fs/cgroup/memory/*/memory.swappiness /sys/fs/cgroup/memory/*/*/memory.swappiness; do - echo 1 | sudo tee $swappiness_file > /dev/null + for swappiness_dir in /sys/fs/cgroup/memory/*/ /sys/fs/cgroup/memory/*/*/; do + if [ -d "swappiness_dir" ]; then + echo 1 | sudo tee $(swappiness_dir)memory.swappiness > /dev/null + fi done # use "madvise" Linux Transparent HugePages (THP) setting diff --git a/.github/workflows/ci-build-macos.yaml b/.github/workflows/ci-build-macos.yaml index 0236ab3c6fab5..4f4adc4d20604 100644 --- a/.github/workflows/ci-build-macos.yaml +++ b/.github/workflows/ci-build-macos.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 diff --git a/.github/workflows/ci-cpp-build-centos7.yaml b/.github/workflows/ci-cpp-build-centos7.yaml index 55151ba1cdd5c..ca5f5632cf29a 100644 --- a/.github/workflows/ci-cpp-build-centos7.yaml +++ b/.github/workflows/ci-cpp-build-centos7.yaml @@ -22,15 +22,11 @@ on: pull_request: branches: - master - paths: - - '.github/workflows/**' - - 'pulsar-client-cpp/**' - push: - branches: - branch-* paths: - '.github/workflows/**' - 'pulsar-client-cpp/**' + jobs: cpp-build-centos7: diff --git a/.github/workflows/ci-cpp-build-windows.yaml b/.github/workflows/ci-cpp-build-windows.yaml index a287a4c3560e1..963a717111ba2 100644 --- a/.github/workflows/ci-cpp-build-windows.yaml +++ b/.github/workflows/ci-cpp-build-windows.yaml @@ -22,16 +22,15 @@ on: pull_request: branches: - master - paths: - - '.github/workflows/**' - - 'pulsar-client-cpp/**' - push: - branches: - branch-* paths: - '.github/workflows/**' - 'pulsar-client-cpp/**' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: VCPKG_FEATURE_FLAGS: manifests @@ -46,18 +45,18 @@ jobs: matrix: include: - name: 'Windows x64' - os: windows-latest + os: windows-2022 triplet: x64-windows vcpkg_dir: 'C:\vcpkg' suffix: 'windows-win64' - generator: 'Visual Studio 16 2019' + generator: 'Visual Studio 17 2022' arch: '-A x64' - name: 'Windows x86' - os: windows-latest + os: windows-2022 triplet: x86-windows vcpkg_dir: 'C:\vcpkg' suffix: 'windows-win32' - generator: 'Visual Studio 16 2019' + generator: 'Visual Studio 17 2022' arch: '-A Win32' steps: @@ -65,7 +64,7 @@ jobs: uses: actions/checkout@v2 - name: Detect changed files - id: changes + id: changes uses: apache/pulsar-test-infra/paths-filter@master with: filters: .github/changes-filter.yaml diff --git a/.github/workflows/ci-cpp.yaml b/.github/workflows/ci-cpp.yaml index 70f5f6f917bd4..1f30edc42751b 100644 --- a/.github/workflows/ci-cpp.yaml +++ b/.github/workflows/ci-cpp.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-go-functions-style.yaml b/.github/workflows/ci-go-functions-style.yaml index 4c77cf2bf47b8..c4702a0a3fb7c 100644 --- a/.github/workflows/ci-go-functions-style.yaml +++ b/.github/workflows/ci-go-functions-style.yaml @@ -22,16 +22,15 @@ on: pull_request: branches: - master - paths: - - '.github/workflows/**' - - 'pulsar-function-go/**' - push: - branches: - branch-* paths: - '.github/workflows/**' - 'pulsar-function-go/**' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 diff --git a/.github/workflows/ci-go-functions-test.yaml b/.github/workflows/ci-go-functions-test.yaml index 37e8b81729efa..1a95ad3750072 100644 --- a/.github/workflows/ci-go-functions-test.yaml +++ b/.github/workflows/ci-go-functions-test.yaml @@ -22,16 +22,15 @@ on: pull_request: branches: - master - paths: - - '.github/workflows/**' - - 'pulsar-function-go/**' - push: - branches: - branch-* paths: - '.github/workflows/**' - 'pulsar-function-go/**' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 diff --git a/.github/workflows/ci-integration-backwards-compatibility.yaml b/.github/workflows/ci-integration-backwards-compatibility.yaml index 3b13b63d38da4..f5827d6a3f872 100644 --- a/.github/workflows/ci-integration-backwards-compatibility.yaml +++ b/.github/workflows/ci-integration-backwards-compatibility.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-cli.yaml b/.github/workflows/ci-integration-cli.yaml index cf51dcc761103..df0698afbe177 100644 --- a/.github/workflows/ci-integration-cli.yaml +++ b/.github/workflows/ci-integration-cli.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-function.yaml b/.github/workflows/ci-integration-function.yaml index 52b6d8f972a5d..fec9941ab39e6 100644 --- a/.github/workflows/ci-integration-function.yaml +++ b/.github/workflows/ci-integration-function.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-messaging.yaml b/.github/workflows/ci-integration-messaging.yaml index 0a81e4a487997..dd099ca5af3e0 100644 --- a/.github/workflows/ci-integration-messaging.yaml +++ b/.github/workflows/ci-integration-messaging.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-process.yaml b/.github/workflows/ci-integration-process.yaml index a5523c15f2a2e..1e652d64c5da5 100644 --- a/.github/workflows/ci-integration-process.yaml +++ b/.github/workflows/ci-integration-process.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-pulsar-io-ora.yaml b/.github/workflows/ci-integration-pulsar-io-ora.yaml index 8bb1cba5db3d0..bad5aa40d3d57 100644 --- a/.github/workflows/ci-integration-pulsar-io-ora.yaml +++ b/.github/workflows/ci-integration-pulsar-io-ora.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-pulsar-io.yaml b/.github/workflows/ci-integration-pulsar-io.yaml index 538e94e284247..8ba7d7f5450be 100644 --- a/.github/workflows/ci-integration-pulsar-io.yaml +++ b/.github/workflows/ci-integration-pulsar-io.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-schema.yaml b/.github/workflows/ci-integration-schema.yaml index edeec555b8bc7..fdc38383c59d5 100644 --- a/.github/workflows/ci-integration-schema.yaml +++ b/.github/workflows/ci-integration-schema.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-sql.yaml b/.github/workflows/ci-integration-sql.yaml index 53c996874583a..6b5122124f929 100644 --- a/.github/workflows/ci-integration-sql.yaml +++ b/.github/workflows/ci-integration-sql.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Xmx768m -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 MALLOC_ARENA_MAX: "1" @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-standalone.yaml b/.github/workflows/ci-integration-standalone.yaml index dc3577677f742..46ffe3a82fea5 100644 --- a/.github/workflows/ci-integration-standalone.yaml +++ b/.github/workflows/ci-integration-standalone.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-thread.yaml b/.github/workflows/ci-integration-thread.yaml index 0420baec17970..7e758e7556ba3 100644 --- a/.github/workflows/ci-integration-thread.yaml +++ b/.github/workflows/ci-integration-thread.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-tiered-filesystem.yaml b/.github/workflows/ci-integration-tiered-filesystem.yaml index 89ed30de58aff..b89f0d81957a6 100644 --- a/.github/workflows/ci-integration-tiered-filesystem.yaml +++ b/.github/workflows/ci-integration-tiered-filesystem.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-tiered-jcloud.yaml b/.github/workflows/ci-integration-tiered-jcloud.yaml index 3cf661a5a162f..19ef68bdcb643 100644 --- a/.github/workflows/ci-integration-tiered-jcloud.yaml +++ b/.github/workflows/ci-integration-tiered-jcloud.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-integration-transaction.yaml b/.github/workflows/ci-integration-transaction.yaml index 547066db495f1..e020aec7483d1 100644 --- a/.github/workflows/ci-integration-transaction.yaml +++ b/.github/workflows/ci-integration-transaction.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-license.yaml b/.github/workflows/ci-license.yaml index f706a1cd73bfb..8e1225c328385 100644 --- a/.github/workflows/ci-license.yaml +++ b/.github/workflows/ci-license.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 @@ -86,4 +88,4 @@ jobs: - name: license check if: ${{ steps.check_changes.outputs.docs_only != 'true' }} - run: src/check-binary-license ./distribution/server/target/apache-pulsar-*-bin.tar.gz + run: src/check-binary-license --no-presto ./distribution/server/target/apache-pulsar-*-bin.tar.gz diff --git a/.github/workflows/ci-maven-cache-update.yaml b/.github/workflows/ci-maven-cache-update.yaml index b04d2860c93a8..755f2afdf1b26 100644 --- a/.github/workflows/ci-maven-cache-update.yaml +++ b/.github/workflows/ci-maven-cache-update.yaml @@ -102,7 +102,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ (github.event_name == 'schedule' || steps.changes.outputs.poms == 'true') && steps.cache.outputs.cache-hit != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: Download dependencies diff --git a/.github/workflows/ci-owasp-dependency-check.yaml b/.github/workflows/ci-owasp-dependency-check.yaml index 3e951275addcb..301dd25b1bcc9 100644 --- a/.github/workflows/ci-owasp-dependency-check.yaml +++ b/.github/workflows/ci-owasp-dependency-check.yaml @@ -53,7 +53,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v2 with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: run install by skip tests diff --git a/.github/workflows/ci-pulsar-website-build.yaml b/.github/workflows/ci-pulsar-website-build.yaml index 58d67ad3e0cbc..1c8658c0f4e5e 100644 --- a/.github/workflows/ci-pulsar-website-build.yaml +++ b/.github/workflows/ci-pulsar-website-build.yaml @@ -53,7 +53,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v2 with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-python-build-3.9-client.yaml b/.github/workflows/ci-python-build-3.9-client.yaml index f8b04a77f6b2c..aed5d3568fbd1 100644 --- a/.github/workflows/ci-python-build-3.9-client.yaml +++ b/.github/workflows/ci-python-build-3.9-client.yaml @@ -22,15 +22,11 @@ on: pull_request: branches: - master - paths: - - '.github/workflows/**' - - 'pulsar-client-cpp/**' - push: - branches: - branch-* paths: - '.github/workflows/**' - 'pulsar-client-cpp/**' + jobs: build-wheel: diff --git a/.github/workflows/ci-shade-test.yaml b/.github/workflows/ci-shade-test.yaml index 8789bf7dfed44..cb66e1638c03f 100644 --- a/.github/workflows/ci-shade-test.yaml +++ b/.github/workflows/ci-shade-test.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -69,7 +71,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: clean disk diff --git a/.github/workflows/ci-unit-broker-broker-gp1.yaml b/.github/workflows/ci-unit-broker-broker-gp1.yaml index 6defee0cdc2e3..c752c2900997f 100644 --- a/.github/workflows/ci-unit-broker-broker-gp1.yaml +++ b/.github/workflows/ci-unit-broker-broker-gp1.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: build modules diff --git a/.github/workflows/ci-unit-broker-broker-gp2.yaml b/.github/workflows/ci-unit-broker-broker-gp2.yaml index 821157c369387..b4b8eabebbc43 100644 --- a/.github/workflows/ci-unit-broker-broker-gp2.yaml +++ b/.github/workflows/ci-unit-broker-broker-gp2.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: build modules diff --git a/.github/workflows/ci-unit-broker-client-api.yaml b/.github/workflows/ci-unit-broker-client-api.yaml index 5bcefdd0529d9..319a43b97b985 100644 --- a/.github/workflows/ci-unit-broker-client-api.yaml +++ b/.github/workflows/ci-unit-broker-client-api.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: build modules diff --git a/.github/workflows/ci-unit-broker-client-impl.yaml b/.github/workflows/ci-unit-broker-client-impl.yaml index 21003a14288f4..a5ec3b90be1ed 100644 --- a/.github/workflows/ci-unit-broker-client-impl.yaml +++ b/.github/workflows/ci-unit-broker-client-impl.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: build modules diff --git a/.github/workflows/ci-unit-broker-jdk8.yaml b/.github/workflows/ci-unit-broker-jdk8.yaml index ce56ca2b92d70..aa31b609ef7d3 100644 --- a/.github/workflows/ci-unit-broker-jdk8.yaml +++ b/.github/workflows/ci-unit-broker-jdk8.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 8 - name: build modules diff --git a/.github/workflows/ci-unit-broker-other.yaml b/.github/workflows/ci-unit-broker-other.yaml index 50a87b5124ab6..c7f99a7352241 100644 --- a/.github/workflows/ci-unit-broker-other.yaml +++ b/.github/workflows/ci-unit-broker-other.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: build modules diff --git a/.github/workflows/ci-unit-proxy.yaml b/.github/workflows/ci-unit-proxy.yaml index bc2cfb9d14679..4e32b89d246da 100644 --- a/.github/workflows/ci-unit-proxy.yaml +++ b/.github/workflows/ci-unit-proxy.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: build modules pulsar-proxy diff --git a/.github/workflows/ci-unit.yaml b/.github/workflows/ci-unit.yaml index 37bdfafb0c641..01162bdcae3f0 100644 --- a/.github/workflows/ci-unit.yaml +++ b/.github/workflows/ci-unit.yaml @@ -22,10 +22,12 @@ on: pull_request: branches: - master - push: - branches: - branch-* +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 @@ -68,7 +70,7 @@ jobs: uses: actions/setup-java@v2 if: ${{ steps.check_changes.outputs.docs_only != 'true' }} with: - distribution: 'adopt' + distribution: 'temurin' java-version: 11 - name: run unit test 'OTHER' diff --git a/NOTICE b/NOTICE index 2052417487269..bbbe4fab89b56 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Apache Pulsar -Copyright 2017-2021 The Apache Software Foundation +Copyright 2017-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/bin/pulsar b/bin/pulsar index 69fcd6e48e1de..0640a94333bd4 100755 --- a/bin/pulsar +++ b/bin/pulsar @@ -140,6 +140,7 @@ where command is one of: sql-worker Run a sql worker server sql Run sql CLI standalone Run a broker server with local bookies and local zookeeper + autorecovery Run an autorecovery service initialize-cluster-metadata One-time metadata initialization delete-cluster-metadata Delete a cluster's metadata @@ -284,8 +285,8 @@ fi # log directory & file PULSAR_LOG_DIR=${PULSAR_LOG_DIR:-"$PULSAR_HOME/logs"} PULSAR_LOG_APPENDER=${PULSAR_LOG_APPENDER:-"RoutingAppender"} -PULSAR_LOG_ROOT_LEVEL=${PULSAR_LOG_ROOT_LEVEL:-"info"} PULSAR_LOG_LEVEL=${PULSAR_LOG_LEVEL:-"info"} +PULSAR_LOG_ROOT_LEVEL=${PULSAR_LOG_ROOT_LEVEL:-"${PULSAR_LOG_LEVEL}"} PULSAR_ROUTING_APPENDER_DEFAULT=${PULSAR_ROUTING_APPENDER_DEFAULT:-"Console"} #Configure log configuration system properties @@ -343,6 +344,9 @@ elif [ $COMMAND == "functions-worker" ]; then elif [ $COMMAND == "standalone" ]; then PULSAR_LOG_FILE=${PULSAR_LOG_FILE:-"pulsar-standalone.log"} exec $JAVA $LOG4J2_SHUTDOWN_HOOK_DISABLED $OPTS ${ZK_OPTS} -Dpulsar.log.file=$PULSAR_LOG_FILE org.apache.pulsar.PulsarStandaloneStarter --config $PULSAR_STANDALONE_CONF $@ +elif [ ${COMMAND} == "autorecovery" ]; then + PULSAR_LOG_FILE=${PULSAR_LOG_FILE:-"pulsar-autorecovery.log"} + exec $JAVA $OPTS -Dpulsar.log.file=$PULSAR_LOG_FILE org.apache.bookkeeper.replication.AutoRecoveryMain --conf $PULSAR_BOOKKEEPER_CONF $@ elif [ $COMMAND == "initialize-cluster-metadata" ]; then exec $JAVA $OPTS org.apache.pulsar.PulsarClusterMetadataSetup $@ elif [ $COMMAND == "delete-cluster-metadata" ]; then diff --git a/bin/pulsar-admin-common.sh b/bin/pulsar-admin-common.sh index 46ab6d8c75818..a0945bfc2e939 100755 --- a/bin/pulsar-admin-common.sh +++ b/bin/pulsar-admin-common.sh @@ -99,10 +99,12 @@ OPTS="$OPTS $PULSAR_EXTRA_OPTS" PULSAR_LOG_DIR=${PULSAR_LOG_DIR:-"$PULSAR_HOME/logs"} PULSAR_LOG_APPENDER=${PULSAR_LOG_APPENDER:-"RoutingAppender"} PULSAR_LOG_LEVEL=${PULSAR_LOG_LEVEL:-"info"} +PULSAR_LOG_ROOT_LEVEL=${PULSAR_LOG_ROOT_LEVEL:-"${PULSAR_LOG_LEVEL}"} PULSAR_ROUTING_APPENDER_DEFAULT=${PULSAR_ROUTING_APPENDER_DEFAULT:-"Console"} #Configure log configuration system properties OPTS="$OPTS -Dpulsar.log.appender=$PULSAR_LOG_APPENDER" OPTS="$OPTS -Dpulsar.log.dir=$PULSAR_LOG_DIR" OPTS="$OPTS -Dpulsar.log.level=$PULSAR_LOG_LEVEL" +OPTS="$OPTS -Dpulsar.log.root.level=$PULSAR_LOG_ROOT_LEVEL" OPTS="$OPTS -Dpulsar.routing.appender.default=$PULSAR_ROUTING_APPENDER_DEFAULT" diff --git a/bin/pulsar-client b/bin/pulsar-client index dcee2e3b646a9..73f82df37b06b 100755 --- a/bin/pulsar-client +++ b/bin/pulsar-client @@ -102,11 +102,13 @@ OPTS="$OPTS $PULSAR_EXTRA_OPTS" PULSAR_LOG_DIR=${PULSAR_LOG_DIR:-"$PULSAR_HOME/logs"} PULSAR_LOG_APPENDER=${PULSAR_LOG_APPENDER:-"Console"} PULSAR_LOG_LEVEL=${PULSAR_LOG_LEVEL:-"info"} +PULSAR_LOG_ROOT_LEVEL=${PULSAR_LOG_ROOT_LEVEL:-"${PULSAR_LOG_LEVEL}"} #Configure log configuration system properties OPTS="$OPTS -Dpulsar.log.dir=$PULSAR_LOG_DIR" OPTS="$OPTS -Dpulsar.log.appender=$PULSAR_LOG_APPENDER" OPTS="$OPTS -Dpulsar.log.level=$PULSAR_LOG_LEVEL" +OPTS="$OPTS -Dpulsar.log.root.level=$PULSAR_LOG_ROOT_LEVEL" #Change to PULSAR_HOME to support relative paths cd "$PULSAR_HOME" diff --git a/bin/pulsar-daemon b/bin/pulsar-daemon index f018bc4b66154..3bd82acb946a7 100755 --- a/bin/pulsar-daemon +++ b/bin/pulsar-daemon @@ -30,6 +30,7 @@ where command is one of: functions-worker Run a functions worker server standalone Run a standalone Pulsar service proxy Run a Proxy Pulsar service + autorecovery Run an autorecovery service where argument is one of: -force (accepted only with stop command): Decides whether to stop the server forcefully if not stopped by normal shutdown @@ -102,6 +103,9 @@ case $command in (proxy) echo "doing $startStop $command ..." ;; + (autorecovery) + echo "doing $startStop $command ..." + ;; (*) echo "Error: unknown service name $command" usage @@ -139,7 +143,7 @@ mkdir -p "$PULSAR_LOG_DIR" case $startStop in (start) if [ -f $pid ]; then - if kill -0 `cat $pid` > /dev/null 2>&1; then + if ps -p `cat $pid` > /dev/null 2>&1; then echo $command running as process `cat $pid`. Stop it first. exit 1 fi @@ -161,7 +165,7 @@ case $startStop in (stop) if [ -f $pid ]; then TARGET_PID=$(cat $pid) - if kill -0 $TARGET_PID > /dev/null 2>&1; then + if ps -p $TARGET_PID > /dev/null 2>&1; then echo "stopping $command" kill $TARGET_PID @@ -182,7 +186,7 @@ case $startStop in echo "Shutdown completed." fi - if kill -0 $TARGET_PID > /dev/null 2>&1; then + if ps -p $TARGET_PID > /dev/null 2>&1; then fileName=$location/$command.out $JAVA_HOME/bin/jstack $TARGET_PID > $fileName echo "Thread dumps are taken for analysis at $fileName" diff --git a/bin/pulsar-managed-ledger-admin b/bin/pulsar-managed-ledger-admin index 9ed5d69bb132d..dce85ce3246d5 100755 --- a/bin/pulsar-managed-ledger-admin +++ b/bin/pulsar-managed-ledger-admin @@ -244,7 +244,7 @@ def deleteMLLedgerIdsCommand(zk, mlPath, deleteLedgerIds): deletLedgerIds = set(deleteLedgerIds.split(",")) deletLedgerIdSet = set() for id in deletLedgerIds: - deletLedgerIdSet.add(long(id)) + deletLedgerIdSet.add(int(id)) deleteLedgerIdsFromManagedLedgerInfo(zk, mlPath, deletLedgerIdSet) else: print('Usage: --command {} [--ledgerIds]'.format(deleteMlLedgerIds)) @@ -274,7 +274,7 @@ def updateMarkDeleteOfCursorCommand(zk, mlPath, cursorName, markDeletePosition): if markDeletePosition: positionPair = markDeletePosition.split(":") if len(positionPair) == 2: - updateCursorMarkDelete(zk, mlPath + "/" + cursorName, (long(positionPair[0])), long(positionPair[1])) + updateCursorMarkDelete(zk, mlPath + "/" + cursorName, (int(positionPair[0])), int(positionPair[1])) else: print("markDeletePosition must be in format :") else: diff --git a/bin/pulsar-perf b/bin/pulsar-perf index aa6e67a9942de..cef0cf3c0ee7b 100755 --- a/bin/pulsar-perf +++ b/bin/pulsar-perf @@ -87,6 +87,7 @@ Usage: pulsar-perf where command is one of: produce Run a producer consume Run a consumer + transaction Run a transaction repeatedly read Run a topic reader websocket-producer Run a websocket producer @@ -137,9 +138,13 @@ OPTS="$OPTS $PULSAR_EXTRA_OPTS" PULSAR_LOG_APPENDER=${PULSAR_LOG_APPENDER:-"Console"} PULSAR_LOG_DIR=${PULSAR_LOG_DIR:-"$PULSAR_HOME/logs"} PULSAR_LOG_FILE=${PULSAR_LOG_FILE:-"pulsar-perftest.log"} +PULSAR_LOG_LEVEL=${PULSAR_LOG_LEVEL:-"info"} +PULSAR_LOG_ROOT_LEVEL=${PULSAR_LOG_ROOT_LEVEL:-"${PULSAR_LOG_LEVEL}"} #Configure log configuration system properties OPTS="$OPTS -Dpulsar.log.appender=$PULSAR_LOG_APPENDER" +OPTS="$OPTS -Dpulsar.log.level=$PULSAR_LOG_LEVEL" +OPTS="$OPTS -Dpulsar.log.root.level=$PULSAR_LOG_ROOT_LEVEL" OPTS="$OPTS -Dpulsar.log.dir=$PULSAR_LOG_DIR" OPTS="$OPTS -Dpulsar.log.file=$PULSAR_LOG_FILE" @@ -160,6 +165,8 @@ if [ "$COMMAND" == "produce" ]; then exec $JAVA $OPTS org.apache.pulsar.testclient.PerformanceProducer --conf-file $PULSAR_PERFTEST_CONF "$@" elif [ "$COMMAND" == "consume" ]; then exec $JAVA $OPTS org.apache.pulsar.testclient.PerformanceConsumer --conf-file $PULSAR_PERFTEST_CONF "$@" +elif [ "$COMMAND" == "transaction" ]; then + exec $JAVA $OPTS org.apache.pulsar.testclient.PerformanceTransaction --conf-file $PULSAR_PERFTEST_CONF "$@" elif [ "$COMMAND" == "read" ]; then exec $JAVA $OPTS org.apache.pulsar.testclient.PerformanceReader --conf-file $PULSAR_PERFTEST_CONF "$@" elif [ "$COMMAND" == "monitor-brokers" ]; then diff --git a/bouncy-castle/bc/pom.xml b/bouncy-castle/bc/pom.xml index 696b43fafcf6c..620061ab67a6a 100644 --- a/bouncy-castle/bc/pom.xml +++ b/bouncy-castle/bc/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar bouncy-castle-parent - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/bouncy-castle/bcfips-include-test/pom.xml b/bouncy-castle/bcfips-include-test/pom.xml index 07b31ebe0638d..8fa49a3ea2023 100644 --- a/bouncy-castle/bcfips-include-test/pom.xml +++ b/bouncy-castle/bcfips-include-test/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar bouncy-castle-parent - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem index 7f9effa6e92d3..e9be840d3a083 100644 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem +++ b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem @@ -1,13 +1,13 @@ Certificate: Data: - Version: 1 (0x0) + Version: 3 (0x2) Serial Number: - 0c:26:15:df:8f:71:1d:6a:31:d0:da:af:64:ef:80:de:ac:9a:46:76 + 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:05 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: C = US, ST = CA, O = Apache, OU = Apache Pulsar, CN = localhost Subject Public Key Info: Public Key Algorithm: rsaEncryption @@ -32,37 +32,41 @@ Certificate: a0:1a:81:9d:d2:e1:66:dd:c4:cc:fc:63:04:ac:ec: a7:35 Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 Signature Algorithm: sha256WithRSAEncryption - 3a:38:c8:85:48:ed:84:c9:f4:bc:ef:b4:4b:a1:46:9c:97:9b: - 5f:7e:1a:ff:9b:dc:93:0e:7e:ab:de:09:21:30:1f:7f:2a:f7: - 94:d1:b3:07:3d:b1:71:4f:72:90:1f:41:3d:fe:34:14:ac:5a: - 39:02:f1:a4:8a:d1:d3:c0:48:da:6f:37:dc:b5:1d:60:29:e6: - c5:b0:ce:b4:52:8d:f6:6b:59:0b:e4:c8:f1:1a:40:3a:4f:bd: - e2:dd:32:2f:21:3c:33:d7:61:5f:86:cd:94:31:31:f1:ff:c6: - 08:9e:67:bc:8f:9d:bf:38:a8:8c:ff:3f:1f:fb:24:ab:bb:7c: - fb:1b:c3:1b:62:b4:dd:21:d3:7b:19:92:16:b7:7d:f6:95:ee: - 14:a0:83:de:c5:05:d8:af:44:1d:f7:eb:32:e2:03:ac:c9:12: - df:11:b6:af:f8:b9:24:ae:55:3e:25:ae:2a:b2:d3:b6:6a:e9: - f9:28:e6:e0:46:98:66:2c:0d:a3:fe:c7:82:48:13:80:f2:b2: - d1:5c:7d:bb:11:1c:60:62:1b:f7:1a:11:e1:ee:29:70:f1:95: - c1:67:c4:f1:e2:d5:f4:24:49:0d:6e:2f:65:7b:48:cd:40:f9: - c9:26:a3:c7:41:20:d1:6e:2c:38:8e:1b:bc:93:fa:22:39:3d: - 2a:f6:ba:77 + 88:1d:a7:42:a1:1c:87:45:4a:e6:5e:aa:9c:7b:71:2e:5c:9e: + 11:85:0f:a3:c5:b4:ea:73:9e:b7:61:9d:4a:e9:cd:1a:c5:2e: + 03:be:a3:2b:b6:12:6a:15:03:04:3f:fb:4a:09:0d:84:0e:dd: + c0:63:2b:0f:13:fb:1f:98:64:49:48:e7:96:d5:41:c4:ca:94: + bf:ab:c5:ea:80:2c:ee:1f:ab:12:54:74:f1:f1:56:ea:03:c0: + 1c:0d:8d:b9:6e:b0:d0:5f:21:c1:d3:e3:45:df:cf:64:69:13: + 6c:54:79:06:7d:53:46:77:3c:21:cc:c4:6a:5f:f9:9a:07:0f: + a5:95:20:f0:0e:93:07:48:96:a9:2c:28:50:21:d7:f8:13:4f: + b8:ca:aa:1f:a6:41:7c:71:1f:ad:11:3f:3d:1e:e9:81:3c:86: + c1:af:2d:39:a0:13:9f:99:ec:9a:47:44:df:28:02:a7:1d:6a: + 8d:c0:1e:24:e8:19:fc:1d:dc:67:29:04:be:0a:d6:c5:81:59: + 27:2c:f5:e5:df:ba:0b:c6:50:e5:b3:bd:73:12:3e:2c:ef:a6: + 8a:ed:eb:86:9a:45:45:52:a3:44:78:12:60:17:e2:3a:32:92: + 03:6e:89:89:16:c5:e0:bc:be:a7:cb:93:4b:d8:56:33:a0:a0: + 53:b2:0d:a5 -----BEGIN CERTIFICATE----- -MIIC7zCCAdcCFAwmFd+PcR1qMdDar2TvgN6smkZ2MA0GCSqGSIb3DQEBCwUAMBEx -DzANBgNVBAMMBkNBUm9vdDAeFw0yMTA0MjMxNzA4NTFaFw0zMTA0MjExNzA4NTFa -MFcxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYw -FAYDVQQLEw1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvv7ctmK2d9tqjE9RiD5i+HKKJIrpv -1f0fZ+ORA5iAgQ7t2PZwfyw2aD1T6lg6ptWJZku9HldxE21LEeVApXaEJJJAWICW -yR8sxFXro3lzcFw3montL7pr44J8aUoCVIuBXjy/TIrL6ixeg+e3EAhfglijidHa -kroqKO4wKD9brhBxlsfhEsWwGq1Eb0Q6EUqaPA+NBoB7NO8/bPRexURUHsjdx4CF -gNlo5sZTA3fh/hhhB3cFTO1ZvF1BOGrvXaGyYJjUSCiVAooO/c97G9IRzBAMUHPX -zDhsg915JqqQyJuEhrxZ6WJp9JgbxIB4fqAagZ3S4WbdxMz8YwSs7Kc1AgMBAAEw -DQYJKoZIhvcNAQELBQADggEBADo4yIVI7YTJ9LzvtEuhRpyXm19+Gv+b3JMOfqve -CSEwH38q95TRswc9sXFPcpAfQT3+NBSsWjkC8aSK0dPASNpvN9y1HWAp5sWwzrRS -jfZrWQvkyPEaQDpPveLdMi8hPDPXYV+GzZQxMfH/xgieZ7yPnb84qIz/Px/7JKu7 -fPsbwxtitN0h03sZkha3ffaV7hSgg97FBdivRB336zLiA6zJEt8Rtq/4uSSuVT4l -riqy07Zq6fko5uBGmGYsDaP+x4JIE4DystFcfbsRHGBiG/caEeHuKXDxlcFnxPHi -1fQkSQ1uL2V7SM1A+ckmo8dBINFuLDiOG7yT+iI5PSr2unc= +MIIDFDCCAfygAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgUwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowVzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQ8wDQYDVQQKEwZBcGFj +aGUxFjAUBgNVBAsTDUFwYWNoZSBQdWxzYXIxEjAQBgNVBAMTCWxvY2FsaG9zdDCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK+/ty2YrZ322qMT1GIPmL4c +ookium/V/R9n45EDmICBDu3Y9nB/LDZoPVPqWDqm1YlmS70eV3ETbUsR5UCldoQk +kkBYgJbJHyzEVeujeXNwXDeaie0vumvjgnxpSgJUi4FePL9MisvqLF6D57cQCF+C +WKOJ0dqSuioo7jAoP1uuEHGWx+ESxbAarURvRDoRSpo8D40GgHs07z9s9F7FRFQe +yN3HgIWA2WjmxlMDd+H+GGEHdwVM7Vm8XUE4au9dobJgmNRIKJUCig79z3sb0hHM +EAxQc9fMOGyD3XkmqpDIm4SGvFnpYmn0mBvEgHh+oBqBndLhZt3EzPxjBKzspzUC +AwEAAaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB +CwUAA4IBAQCIHadCoRyHRUrmXqqce3EuXJ4RhQ+jxbTqc563YZ1K6c0axS4DvqMr +thJqFQMEP/tKCQ2EDt3AYysPE/sfmGRJSOeW1UHEypS/q8XqgCzuH6sSVHTx8Vbq +A8AcDY25brDQXyHB0+NF389kaRNsVHkGfVNGdzwhzMRqX/maBw+llSDwDpMHSJap +LChQIdf4E0+4yqofpkF8cR+tET89HumBPIbBry05oBOfmeyaR0TfKAKnHWqNwB4k +6Bn8HdxnKQS+CtbFgVknLPXl37oLxlDls71zEj4s76aK7euGmkVFUqNEeBJgF+I6 +MpIDbomJFsXgvL6ny5NL2FYzoKBTsg2l -----END CERTIFICATE----- diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem index 90fbb9b8898fb..21bbaba213f69 100644 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem +++ b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem @@ -2,76 +2,76 @@ Certificate: Data: Version: 3 (0x2) Serial Number: - 10:50:a0:5c:8e:cf:88:33:b6:b5:d2:1e:38:bf:78:56:2a:f1:09:22 + 70:4c:6b:e0:aa:cc:01:77:f2:1f:04:8c:d4:72:03:a5:32:5f:c7:be Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: CN = CARoot Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: - 00:c4:92:ca:40:ce:8d:71:dd:e9:2b:e3:3b:b7:17: - 1d:25:bf:12:66:c0:cb:32:18:32:3e:24:ea:e1:26: - 1a:97:e8:85:4b:19:8e:c0:0a:da:a6:57:ec:31:a6: - a8:68:d9:8e:5c:a2:00:54:30:11:47:a6:0e:84:0d: - 6d:e3:48:a8:a6:e3:42:63:97:ef:91:c0:3a:bc:db: - 77:77:3b:d0:45:fc:c5:a8:3a:74:dc:82:4e:83:ed: - f9:9d:a0:30:11:0c:d9:20:7b:a6:04:60:a1:9c:41: - 33:c6:04:d2:a7:e8:b1:46:e6:35:5e:fd:ca:2e:42: - 2f:f4:0c:f7:6e:8d:60:f5:cf:82:7a:e3:eb:ed:d0: - a1:51:a9:78:8d:14:2d:ca:ea:cc:fa:ae:a9:f9:6c: - df:5c:cb:83:4a:42:22:5c:48:3e:a6:63:70:43:63: - ff:3f:d8:1f:88:e1:91:7b:49:b9:67:10:8a:60:51: - 24:68:db:68:24:5f:10:a5:a2:b3:95:83:7e:3c:88: - 9c:1c:52:6a:2c:03:52:aa:90:90:85:21:78:a7:20: - b0:e2:dc:79:b4:b7:57:f0:be:df:3b:fc:21:23:ee: - ff:63:5d:0b:0d:3d:ab:61:54:8c:2d:96:44:7b:42: - 10:60:3b:1d:a8:ab:33:01:e7:96:74:08:a6:f9:9d: - ba:cf + 00:dc:9c:01:30:5f:c5:42:48:10:78:30:5d:66:20: + 0e:74:61:f6:82:74:9f:6f:b2:ed:00:9e:6c:21:b6: + 83:21:6b:54:34:e8:a9:dc:81:83:7a:0e:9f:cc:3d: + eb:97:ee:cf:ca:0e:5f:96:81:dc:e7:75:88:91:2f: + d5:65:74:c2:d8:67:58:d8:41:6a:5f:a9:79:dc:29: + 36:4a:b8:39:20:d2:f8:a8:59:9f:e3:be:f9:61:80: + 1b:ce:63:bb:12:56:06:b9:77:4e:6a:40:65:9b:bf: + 5b:f8:27:88:f5:ff:40:ee:47:bc:2d:8e:c3:a6:62: + 0d:18:76:d1:f5:af:1a:6b:25:4e:d4:55:15:f0:e3: + 97:1b:68:eb:75:b8:80:ea:64:ef:7e:e2:f0:5c:da: + 6d:d6:16:7b:0f:5e:ae:72:47:5a:df:0b:8a:e0:74: + c1:b7:82:0d:97:41:d7:84:16:51:40:37:15:a1:eb: + 70:0c:f1:5a:26:39:11:1e:97:b9:36:32:ce:16:b9: + 42:ad:31:5b:1e:89:f5:3e:07:0e:d6:fc:9a:46:8e: + 87:89:90:5c:f3:00:e4:9b:ce:7b:93:fe:9a:d8:65: + ec:49:5c:e8:eb:41:3d:53:bc:ce:e8:6d:44:ec:76: + 3f:e6:9b:13:e4:f8:d0:1c:00:e6:4f:73:e1:b0:27: + 6f:99 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: - C6:91:71:A0:C9:1F:A9:5A:87:7B:E5:10:FB:9A:2A:12:90:44:7D:A0 + 8B:30:D2:81:7C:BE:AB:4D:76:37:19:2B:69:5E:DB:F7:81:95:73:F5 X509v3 Authority Key Identifier: - keyid:C6:91:71:A0:C9:1F:A9:5A:87:7B:E5:10:FB:9A:2A:12:90:44:7D:A0 + keyid:8B:30:D2:81:7C:BE:AB:4D:76:37:19:2B:69:5E:DB:F7:81:95:73:F5 X509v3 Basic Constraints: critical CA:TRUE Signature Algorithm: sha256WithRSAEncryption - 5d:c2:68:9e:66:fb:67:39:fc:5e:2f:ba:4c:f0:20:3f:f9:4a: - e2:b9:05:56:d6:5e:da:01:c7:8b:1a:70:e6:67:61:84:71:67: - a8:11:bc:7c:4d:58:d0:52:44:71:19:47:87:60:cb:16:12:25: - b2:b0:95:13:ff:52:00:36:78:2d:d3:ce:4e:c6:7d:1b:e5:8e: - 37:23:8a:ef:c2:44:88:e2:bc:47:c4:ef:23:f5:8b:6d:fc:39: - 3c:cb:7e:70:7c:60:51:33:5a:38:3a:fd:cc:8f:2c:08:d5:07: - 06:f9:89:77:96:8e:60:21:e5:05:98:37:d6:c4:b7:a3:43:9e: - 87:13:9d:12:c4:8f:6a:ad:a9:67:c4:3a:7e:14:77:c3:75:72: - 95:e6:25:a2:14:e7:77:4d:8f:dd:45:ae:f0:f6:f3:fe:2b:cf: - ea:0e:f8:61:66:45:db:9f:6b:e4:5e:b8:d4:04:41:68:e9:7c: - a4:7e:c8:1c:4d:ec:49:49:57:a4:46:95:e8:0f:55:ea:08:2e: - b9:7a:62:e2:be:05:00:d5:81:5f:60:60:58:4e:19:bc:24:ee: - 0e:17:63:da:fd:40:44:c2:5f:7d:e9:26:b4:80:4d:db:88:4f: - 31:a4:16:93:fd:a8:70:94:50:f1:23:92:20:fb:26:c3:9a:71: - b1:9c:c9:db + 02:4c:80:4f:a4:b5:f4:70:be:82:cf:3a:ed:40:f9:97:17:22: + 07:5d:e0:9b:4e:54:f8:4b:64:99:f5:07:7f:87:5b:9c:60:ec: + 9f:69:e6:00:97:5a:cd:14:59:31:45:be:b7:bd:c4:ce:57:82: + 1a:4a:62:ce:8e:c8:59:d5:62:43:8b:94:c0:ab:c2:cc:3a:a0: + 69:d3:65:15:82:35:de:85:64:e6:7b:d9:3a:22:12:77:f7:71: + 82:86:d7:6c:e5:69:d5:3a:f2:a7:25:f7:dc:f3:6f:cb:eb:85: + 48:44:63:e2:6d:3c:82:eb:3a:c0:e1:bd:9d:3a:12:11:66:1f: + 05:8f:49:65:31:d6:cf:26:06:46:ba:73:c7:ad:61:fc:14:5f: + 68:d1:ee:02:5f:4b:98:b6:5b:0c:98:4e:61:7b:cb:35:ee:44: + a1:ce:e1:00:a2:56:f0:0d:72:3b:58:66:e8:9a:dc:62:d5:95: + 3e:5a:48:21:a8:7c:f8:1f:5a:13:db:53:33:11:3e:e6:14:39: + cd:2b:3f:77:5b:ee:f7:0c:59:69:2f:46:9a:34:56:89:05:8e: + 40:94:94:3f:95:f6:fa:f9:1a:e8:1a:80:7b:1d:f7:0c:a1:be: + e2:38:98:fd:0f:e7:68:4d:7d:fe:ae:5f:e3:32:c6:5d:37:77: + 7a:28:ce:cc -----BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIUEFCgXI7PiDO2tdIeOL94VirxCSIwDQYJKoZIhvcNAQEL -BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIxMDQyMzE3MDg1MVoXDTMxMDQyMTE3 -MDg1MVowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAxJLKQM6Ncd3pK+M7txcdJb8SZsDLMhgyPiTq4SYal+iFSxmOwAra -plfsMaaoaNmOXKIAVDARR6YOhA1t40iopuNCY5fvkcA6vNt3dzvQRfzFqDp03IJO -g+35naAwEQzZIHumBGChnEEzxgTSp+ixRuY1Xv3KLkIv9Az3bo1g9c+CeuPr7dCh -Ual4jRQtyurM+q6p+WzfXMuDSkIiXEg+pmNwQ2P/P9gfiOGRe0m5ZxCKYFEkaNto -JF8QpaKzlYN+PIicHFJqLANSqpCQhSF4pyCw4tx5tLdX8L7fO/whI+7/Y10LDT2r -YVSMLZZEe0IQYDsdqKszAeeWdAim+Z26zwIDAQABo1MwUTAdBgNVHQ4EFgQUxpFx -oMkfqVqHe+UQ+5oqEpBEfaAwHwYDVR0jBBgwFoAUxpFxoMkfqVqHe+UQ+5oqEpBE -faAwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXcJonmb7Zzn8 -Xi+6TPAgP/lK4rkFVtZe2gHHixpw5mdhhHFnqBG8fE1Y0FJEcRlHh2DLFhIlsrCV -E/9SADZ4LdPOTsZ9G+WONyOK78JEiOK8R8TvI/WLbfw5PMt+cHxgUTNaODr9zI8s -CNUHBvmJd5aOYCHlBZg31sS3o0OehxOdEsSPaq2pZ8Q6fhR3w3VyleYlohTnd02P -3UWu8Pbz/ivP6g74YWZF259r5F641ARBaOl8pH7IHE3sSUlXpEaV6A9V6gguuXpi -4r4FANWBX2BgWE4ZvCTuDhdj2v1ARMJffekmtIBN24hPMaQWk/2ocJRQ8SOSIPsm -w5pxsZzJ2w== +MIIDAzCCAeugAwIBAgIUcExr4KrMAXfyHwSM1HIDpTJfx74wDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA3JwBMF/FQkgQeDBdZiAOdGH2gnSfb7LtAJ5sIbaDIWtUNOip3IGD +eg6fzD3rl+7Pyg5floHc53WIkS/VZXTC2GdY2EFqX6l53Ck2Srg5INL4qFmf4775 +YYAbzmO7ElYGuXdOakBlm79b+CeI9f9A7ke8LY7DpmINGHbR9a8aayVO1FUV8OOX +G2jrdbiA6mTvfuLwXNpt1hZ7D16uckda3wuK4HTBt4INl0HXhBZRQDcVoetwDPFa +JjkRHpe5NjLOFrlCrTFbHon1PgcO1vyaRo6HiZBc8wDkm857k/6a2GXsSVzo60E9 +U7zO6G1E7HY/5psT5PjQHADmT3PhsCdvmQIDAQABo1MwUTAdBgNVHQ4EFgQUizDS +gXy+q012NxkraV7b94GVc/UwHwYDVR0jBBgwFoAUizDSgXy+q012NxkraV7b94GV +c/UwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAAkyAT6S19HC+ +gs867UD5lxciB13gm05U+EtkmfUHf4dbnGDsn2nmAJdazRRZMUW+t73EzleCGkpi +zo7IWdViQ4uUwKvCzDqgadNlFYI13oVk5nvZOiISd/dxgobXbOVp1TrypyX33PNv +y+uFSERj4m08gus6wOG9nToSEWYfBY9JZTHWzyYGRrpzx61h/BRfaNHuAl9LmLZb +DJhOYXvLNe5Eoc7hAKJW8A1yO1hm6JrcYtWVPlpIIah8+B9aE9tTMxE+5hQ5zSs/ +d1vu9wxZaS9GmjRWiQWOQJSUP5X2+vka6BqAex33DKG+4jiY/Q/naE19/q5f4zLG +XTd3eijOzA== -----END CERTIFICATE----- diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem index e79bac70987d9..e5d9e6e74b233 100644 --- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem +++ b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem @@ -1,13 +1,13 @@ Certificate: Data: - Version: 1 (0x0) + Version: 3 (0x2) Serial Number: - 0c:26:15:df:8f:71:1d:6a:31:d0:da:af:64:ef:80:de:ac:9a:46:77 + 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:06 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: C = US, ST = CA, O = Apache, OU = Apache Pulsar, CN = superUser Subject Public Key Info: Public Key Algorithm: rsaEncryption @@ -32,37 +32,41 @@ Certificate: b6:98:ef:dd:03:82:58:a3:32:dc:90:a1:b6:a6:1e: e1:0b Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 Signature Algorithm: sha256WithRSAEncryption - 6f:c2:2f:41:a4:a0:45:10:33:61:20:27:d2:74:40:f9:80:3b: - 06:88:91:c3:b8:4d:1a:c4:fd:39:9e:3a:c8:41:de:31:4e:ef: - 8b:06:ce:17:e2:8e:b5:ee:43:92:0a:44:3d:55:e9:85:81:49: - c9:19:44:15:f1:bd:ec:1e:cb:34:44:b1:01:c0:96:49:30:a4: - 5a:64:44:6e:59:d9:b1:17:bf:01:13:b7:45:53:8c:8d:a7:79: - fc:19:b4:a9:b5:9b:6f:16:8e:b3:de:5e:2a:db:01:f2:3e:b0: - 8f:23:4f:8f:49:ee:d5:b7:98:54:6e:b5:be:8b:fc:05:87:e3: - 8b:2e:70:28:2c:75:75:c3:76:a4:0d:5e:71:67:30:ec:69:cc: - 2b:43:69:3b:e8:78:89:51:98:07:cb:21:e9:7a:76:a9:b3:e8: - e6:19:e7:32:ae:3a:b8:24:c4:20:d8:c2:dc:91:99:d1:9b:8f: - 77:3c:e7:a8:53:ee:91:fe:ed:2b:86:18:0a:55:44:46:78:a1: - 78:41:a5:e9:fe:8b:db:bb:10:2e:72:52:b7:54:81:84:8b:f7: - 29:f3:86:29:7f:f8:e2:d8:51:d8:b2:3c:c2:78:7c:a4:11:9c: - 0a:42:64:1b:13:cc:91:1a:08:d9:ed:f1:23:5f:fd:b3:89:bb: - 7a:cc:96:8d + 90:62:ba:7b:6f:45:95:7a:71:2f:e7:88:0c:64:b8:6c:05:86: + 7f:47:08:ce:d6:e2:5a:32:13:0c:82:ad:a7:af:f0:a2:f7:86: + 79:87:1a:89:78:95:b1:9f:be:c5:8b:39:fd:12:94:b6:e1:69: + ff:fa:1e:c3:82:d8:6c:03:80:45:ac:1c:06:70:bb:77:c3:41: + 5f:b6:9d:fe:36:6f:ae:23:6c:bf:43:79:8e:74:85:8e:96:89: + a9:c4:6d:d9:fa:05:ba:a8:11:7c:82:45:94:3d:9f:b6:7c:2f: + 4e:6d:37:c3:fb:79:7e:0c:d2:15:fa:0e:ea:2d:c9:24:f3:34: + 13:6f:db:d7:55:e1:0c:2f:7e:fe:4c:3b:fa:7e:03:26:0f:6a: + 95:d2:22:ce:27:71:6a:97:ac:36:0a:20:ec:19:a0:78:23:0c: + 54:f3:b1:dd:33:36:7c:b7:61:23:70:8f:7f:c8:5f:e8:9e:b5: + 02:31:4d:b3:40:b0:7b:b2:ee:14:a7:69:22:8b:38:85:5d:04: + 6e:d5:44:41:31:a7:4b:71:86:fb:81:cd:3d:db:96:23:0b:bc: + e1:67:46:0e:87:86:91:4e:1a:35:37:af:a4:ac:9a:de:e3:4f: + 82:47:f1:c4:16:58:11:8f:76:d2:4d:df:a1:c6:a2:8f:33:6d: + 72:15:28:76 -----BEGIN CERTIFICATE----- -MIIC7zCCAdcCFAwmFd+PcR1qMdDar2TvgN6smkZ3MA0GCSqGSIb3DQEBCwUAMBEx -DzANBgNVBAMMBkNBUm9vdDAeFw0yMTA0MjMxNzA4NTFaFw0zMTA0MjExNzA4NTFa -MFcxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYw -FAYDVQQLEw1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlzdXBlclVzZXIwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNQ32YQPmwW7yu28ALrSaQluBiOO1o -sXBGO95E+RRRhhDrypDniOj5kYXg3bW0FLl444bVVG1o7BSStPgiWwU97TElZQgF -hMrmDCESWDLHGmCjT9JKnigZfEWEAIyJ3N6K5U+Ikcyk8YFFTH3C/+LBicYSc5Xi -Nr3brotaaGqQUd4riF+qZ/So42PcvhmCzJ1/5o37gr4iAT1WEztbBLToxRjmLg36 -ukqN6MZaoVGaSmLXr920/OLVza6ZbFxhVgvXDBp3XPU6alS1njOsqXUomnav0HpX -ABuREzH9QoghRwUQAS9Zu8c62eFYTBtscbaY790DglijMtyQobamHuELAgMBAAEw -DQYJKoZIhvcNAQELBQADggEBAG/CL0GkoEUQM2EgJ9J0QPmAOwaIkcO4TRrE/Tme -OshB3jFO74sGzhfijrXuQ5IKRD1V6YWBSckZRBXxveweyzREsQHAlkkwpFpkRG5Z -2bEXvwETt0VTjI2nefwZtKm1m28WjrPeXirbAfI+sI8jT49J7tW3mFRutb6L/AWH -44sucCgsdXXDdqQNXnFnMOxpzCtDaTvoeIlRmAfLIel6dqmz6OYZ5zKuOrgkxCDY -wtyRmdGbj3c856hT7pH+7SuGGApVREZ4oXhBpen+i9u7EC5yUrdUgYSL9ynzhil/ -+OLYUdiyPMJ4fKQRnApCZBsTzJEaCNnt8SNf/bOJu3rMlo0= +MIIDFDCCAfygAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgYwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowVzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQ8wDQYDVQQKEwZBcGFj +aGUxFjAUBgNVBAsTDUFwYWNoZSBQdWxzYXIxEjAQBgNVBAMTCXN1cGVyVXNlcjCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM1DfZhA+bBbvK7bwAutJpCW +4GI47WixcEY73kT5FFGGEOvKkOeI6PmRheDdtbQUuXjjhtVUbWjsFJK0+CJbBT3t +MSVlCAWEyuYMIRJYMscaYKNP0kqeKBl8RYQAjInc3orlT4iRzKTxgUVMfcL/4sGJ +xhJzleI2vduui1poapBR3iuIX6pn9KjjY9y+GYLMnX/mjfuCviIBPVYTO1sEtOjF +GOYuDfq6So3oxlqhUZpKYtev3bT84tXNrplsXGFWC9cMGndc9TpqVLWeM6ypdSia +dq/QelcAG5ETMf1CiCFHBRABL1m7xzrZ4VhMG2xxtpjv3QOCWKMy3JChtqYe4QsC +AwEAAaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB +CwUAA4IBAQCQYrp7b0WVenEv54gMZLhsBYZ/RwjO1uJaMhMMgq2nr/Ci94Z5hxqJ +eJWxn77Fizn9EpS24Wn/+h7DgthsA4BFrBwGcLt3w0Fftp3+Nm+uI2y/Q3mOdIWO +lompxG3Z+gW6qBF8gkWUPZ+2fC9ObTfD+3l+DNIV+g7qLckk8zQTb9vXVeEML37+ +TDv6fgMmD2qV0iLOJ3Fql6w2CiDsGaB4IwxU87HdMzZ8t2EjcI9/yF/onrUCMU2z +QLB7su4Up2kiiziFXQRu1URBMadLcYb7gc0925YjC7zhZ0YOh4aRTho1N6+krJre +40+CR/HEFlgRj3bSTd+hxqKPM21yFSh2 -----END CERTIFICATE----- diff --git a/bouncy-castle/bcfips/pom.xml b/bouncy-castle/bcfips/pom.xml index 49f6737d4d325..4350308bf7d29 100644 --- a/bouncy-castle/bcfips/pom.xml +++ b/bouncy-castle/bcfips/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar bouncy-castle-parent - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/bouncy-castle/pom.xml b/bouncy-castle/pom.xml index f908c0e3fde36..474d20035e6e6 100644 --- a/bouncy-castle/pom.xml +++ b/bouncy-castle/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/build/regenerate_certs_for_tests.sh b/build/regenerate_certs_for_tests.sh index 7e4cf8474e234..fb0274cc19316 100755 --- a/build/regenerate_certs_for_tests.sh +++ b/build/regenerate_certs_for_tests.sh @@ -34,7 +34,7 @@ function reissue_certificate() { keyfile=$1 certfile=$2 openssl x509 -x509toreq -in $certfile -signkey $keyfile -out ${certfile}.csr - openssl x509 -req -CA ca-cert.pem -CAkey ca-key -in ${certfile}.csr -text -outform pem -out $certfile -days 3650 -CAcreateserial + openssl x509 -req -CA ca-cert.pem -CAkey ca-key -in ${certfile}.csr -text -outform pem -out $certfile -days 3650 -CAcreateserial -extfile <(printf "subjectAltName = DNS:localhost, IP:127.0.0.1") } generate_ca @@ -44,6 +44,16 @@ reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/server-key.pem \ $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/server-cert.pem +# use same CA key and cert for ProxyWithAuthorizationTest/client-cacert.pem +cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cacert.pem +reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-key.pem \ + $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cert.pem + +# use same CA key and cert for ProxyWithAuthorizationTest/proxy-cacert.pem +cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cacert.pem +reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-key.pem \ + $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cert.pem + generate_ca cp ca-cert.pem $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem reissue_certificate $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem \ @@ -56,18 +66,5 @@ cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/Prox reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-key.pem \ $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cert.pem -generate_ca -cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cacert.pem -reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-key.pem \ - $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cert.pem - -generate_ca -cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cacert.pem -reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-key.pem \ - $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cert.pem - - - - cd $ROOT_DIR rm -rf /tmp/keygendir$$ diff --git a/build/run_unit_group.sh b/build/run_unit_group.sh index 77082cf3568a4..8bb84679fd944 100755 --- a/build/run_unit_group.sh +++ b/build/run_unit_group.sh @@ -42,7 +42,7 @@ function broker_group_1() { } function broker_group_2() { - $MVN_TEST_COMMAND -pl pulsar-broker -Dgroups='schema,utils,functions-worker,broker-io,broker-discovery,broker-compaction,broker-naming,websocket,other' + $MVN_TEST_COMMAND -pl pulsar-broker -Dgroups='schema,utils,functions-worker,broker-io,broker-discovery,broker-compaction,broker-naming,websocket,other' -DtestReuseFork=false } function broker_client_api() { @@ -117,7 +117,7 @@ function other() { **/ManagedLedgerTest.java, **/TestPulsarKeyValueSchemaHandler.java, **/PrimitiveSchemaTest.java, - BlobStoreManagedLedgerOffloaderTest.java' + BlobStoreManagedLedgerOffloaderTest.java' -DtestReuseFork=false $MVN_TEST_COMMAND -pl managed-ledger -Dinclude='**/ManagedLedgerTest.java, **/OffloadersCacheTest.java' diff --git a/buildtools/pom.xml b/buildtools/pom.xml index 7b5b02a510660..7bb238b716778 100644 --- a/buildtools/pom.xml +++ b/buildtools/pom.xml @@ -31,7 +31,7 @@ org.apache.pulsar buildtools - 2.9.0-SNAPSHOT + 2.9.3 jar Pulsar Build Tools @@ -39,8 +39,8 @@ 1.8 1.8 3.0.0-M3 - 2.14.0 - 1.7.25 + 2.18.0 + 1.7.32 7.3.0 3.11 3.2.4 @@ -81,10 +81,6 @@ org.apache.logging.log4j log4j-slf4j-impl - - org.apache.logging.log4j - log4j-1.2-api - org.slf4j jcl-over-slf4j @@ -105,7 +101,7 @@ io.netty netty-common - 4.1.68.Final + 4.1.77.Final test diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java b/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java index 3ee4ad70a7674..0f001332a98d8 100644 --- a/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java +++ b/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java @@ -41,7 +41,7 @@ */ public class FailFastNotifier implements IInvokedMethodListener, ITestListener { - private static final boolean FAIL_FAST_ENABLED = Boolean.valueOf( + private static final boolean FAIL_FAST_ENABLED = Boolean.parseBoolean( System.getProperty("testFailFast", "true")); static class FailFastEventsSingleton { diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/FastThreadLocalCleanupListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/FastThreadLocalCleanupListener.java index cd75bfbd4318e..c40956a9e259e 100644 --- a/buildtools/src/main/java/org/apache/pulsar/tests/FastThreadLocalCleanupListener.java +++ b/buildtools/src/main/java/org/apache/pulsar/tests/FastThreadLocalCleanupListener.java @@ -27,7 +27,7 @@ public class FastThreadLocalCleanupListener extends BetweenTestClassesListenerAdapter { private static final Logger LOG = LoggerFactory.getLogger(FastThreadLocalCleanupListener.class); private static final boolean FAST_THREAD_LOCAL_CLEANUP_ENABLED = - Boolean.valueOf(System.getProperty("testFastThreadLocalCleanup", "true")); + Boolean.parseBoolean(System.getProperty("testFastThreadLocalCleanup", "true")); private static final String FAST_THREAD_LOCAL_CLEANUP_PACKAGE = System.getProperty("testFastThreadLocalCleanupPackage", "org.apache.pulsar"); private static final FastThreadLocalStateCleaner CLEANER = new FastThreadLocalStateCleaner(object -> { diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java index 0724a9dc22261..354a55c10e39f 100644 --- a/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java +++ b/buildtools/src/main/java/org/apache/pulsar/tests/MockitoCleanupListener.java @@ -32,7 +32,7 @@ public class MockitoCleanupListener extends BetweenTestClassesListenerAdapter { private static final Logger LOG = LoggerFactory.getLogger(MockitoCleanupListener.class); private static final boolean - MOCKITO_CLEANUP_ENABLED = Boolean.valueOf(System.getProperty("testMockitoCleanup", "true")); + MOCKITO_CLEANUP_ENABLED = Boolean.parseBoolean(System.getProperty("testMockitoCleanup", "true")); @Override protected void onBetweenTestClasses(Class endedTestClass, Class startedTestClass) { diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/ThreadLeakDetectorListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/ThreadLeakDetectorListener.java index ddfebc9a44d9e..163591cf82554 100644 --- a/buildtools/src/main/java/org/apache/pulsar/tests/ThreadLeakDetectorListener.java +++ b/buildtools/src/main/java/org/apache/pulsar/tests/ThreadLeakDetectorListener.java @@ -32,9 +32,7 @@ */ public class ThreadLeakDetectorListener extends BetweenTestClassesListenerAdapter { private static final Logger LOG = LoggerFactory.getLogger(ThreadLeakDetectorListener.class); - private static final boolean - THREAD_LEAK_DETECTOR_ENABLED = Boolean.valueOf(System.getProperty("testThreadLeakDetector", - "true")); + private Set capturedThreadKeys; @Override diff --git a/conf/bookkeeper.conf b/conf/bookkeeper.conf index 2fdb0fd1ac9b2..7c9b627bbc783 100644 --- a/conf/bookkeeper.conf +++ b/conf/bookkeeper.conf @@ -166,6 +166,11 @@ maxPendingReadRequestsPerThread=2500 # avoid the executor queue to grow indefinitely maxPendingAddRequestsPerThread=10000 +# Use auto-throttling of the read-worker threads. This is done +# to ensure the bookie is not using unlimited amount of memory +# to respond to read-requests. +readWorkerThreadsThrottlingEnabled=true + # Option to enable busy-wait settings. Default is false. # WARNING: This option will enable spin-waiting on executors and IO threads in order to reduce latency during # context switches. The spinning will consume 100% CPU even when bookie is not doing any work. It is recommended to @@ -272,6 +277,17 @@ useV2WireProtocol=true # # ensemblePlacementPolicy=org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy +# The DNS resolver class used for resolving the network location of each bookie. All bookkeeper clients +# should be configured with the same value to ensure that each bookkeeper client builds +# the same network topology in order to then place ensembles consistently. The setting is used +# when using either RackawareEnsemblePlacementPolicy and RegionAwareEnsemblePlacementPolicy. +# The setting in this file is used to configure the bookkeeper client used in the bookkeeper, which +# is used by the autorecovery process. +# Some available options: +# - org.apache.pulsar.zookeeper.ZkBookieRackAffinityMapping (Pulsar default) +# - org.apache.bookkeeper.net.ScriptBasedMapping (Bookkeeper default) +reppDnsResolverClass=org.apache.pulsar.zookeeper.ZkBookieRackAffinityMapping + ############################################################################# ## Netty server settings ############################################################################# diff --git a/conf/broker.conf b/conf/broker.conf index 1fc578a287f6f..46413ad824f4d 100644 --- a/conf/broker.conf +++ b/conf/broker.conf @@ -270,6 +270,10 @@ brokerMaxConnections=0 # The maximum number of connections per IP. If it exceeds, new connections are rejected. brokerMaxConnectionsPerIp=0 +# Allow schema to be auto updated at broker level. User can override this by +# 'is_allow_auto_update_schema' of namespace policy. +isAllowAutoUpdateSchemaEnabled=true + # Enable check for minimum allowed client library version clientLibraryVersionCheckEnabled=false @@ -391,6 +395,15 @@ dispatcherMinReadBatchSize=1 # Max number of entries to dispatch for a shared subscription. By default it is 20 entries. dispatcherMaxRoundRobinBatchSize=20 +# The read failure backoff initial time in milliseconds. By default it is 15s. +dispatcherReadFailureBackoffInitialTimeInMs=15000 + +# The read failure backoff max time in milliseconds. By default it is 60s. +dispatcherReadFailureBackoffMaxTimeInMs=60000 + +# The read failure backoff mandatory stop time in milliseconds. By default it is 0s. +dispatcherReadFailureBackoffMandatoryStopTimeInMs=0 + # Precise dispathcer flow control according to history message number of each entry preciseDispatcherFlowControl=false @@ -474,9 +487,25 @@ delayedDeliveryEnabled=true # Control the tick time for when retrying on delayed delivery, # affecting the accuracy of the delivery time compared to the scheduled time. +# Note that this time is used to configure the HashedWheelTimer's tick time for the +# InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory). # Default is 1 second. delayedDeliveryTickTimeMillis=1000 +# When using the InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory), whether +# the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt +# time by as much as the tickTimeMillis. This can reduce the overhead on the broker of maintaining the delayed index +# for a potentially very short time period. When true, messages will not be sent to consumer until the deliverAt time +# has passed, and they may be as late as the deliverAt time plus the tickTimeMillis for the topic plus the +# delayedDeliveryTickTimeMillis. +isDelayedDeliveryDeliverAtTimeStrict=false + +# Size of the lookahead window to use when detecting if all the messages in the topic +# have a fixed delay. +# Default is 50,000. Setting the lookahead window to 0 will disable the logic to handle +# fixed delays in messages in a different way. +delayedDeliveryFixedDelayDetectionLookahead=50000 + # Whether to enable acknowledge of batch local index. acknowledgmentAtBatchIndexLevelEnabled=false @@ -521,6 +550,9 @@ zookeeperSessionExpiredPolicy=shutdown # Enable or disable system topic systemTopicEnabled=false +# The schema compatibility strategy to use for system topics +systemTopicSchemaCompatibilityStrategy=ALWAYS_COMPATIBLE + # Enable or disable topic level policies, topic level policies depends on the system topic # Please enable the system topic first. topicLevelPoliciesEnabled=false @@ -578,13 +610,20 @@ tlsCiphers= # authentication. tlsRequireTrustedClientCertOnConnect=false +# Specify the TLS provider for the broker service: +# When using TLS authentication with CACert, the valid value is either OPENSSL or JDK. +# When using TLS authentication with KeyStore, available values can be SunJSSE, Conscrypt and etc. +tlsProvider= + +# Specify the TLS provider for the web service: SunJSSE, Conscrypt and etc. +webServiceTlsProvider=Conscrypt + ### --- KeyStore TLS config variables --- ### +## Note that some of the above TLS configs also apply to the KeyStore TLS configuration. + # Enable TLS with KeyStore type configuration in broker. tlsEnabledWithKeyStore=false -# TLS Provider for KeyStore type -tlsProvider= - # TLS KeyStore type configuration in broker: JKS, PKCS12 tlsKeyStoreType=JKS @@ -633,6 +672,9 @@ brokerClientTlsCiphers= # used by the internal client to authenticate with Pulsar brokers brokerClientTlsProtocols= +# You can add extra configuration options for the Pulsar Client and the Pulsar Admin Client +# by prefixing them with "brokerClient_". These configurations are applied after hard coded configuration +# and before the above brokerClient configurations named above. ### --- Authentication --- ### @@ -673,6 +715,13 @@ athenzDomainNames= # When this parameter is not empty, unauthenticated users perform as anonymousUserRole anonymousUserRole= +## Configure the datasource of basic authenticate, supports the file and Base64 format. +# file: +# basicAuthConf=/path/my/.htpasswd +# use Base64 to encode the contents of .htpasswd: +# basicAuthConf=YOUR-BASE64-DATA +basicAuthConf= + ### --- Token Authentication Provider --- ### ## Symmetric key @@ -708,7 +757,7 @@ saslJaasClientAllowedIds= # Service Principal, for login context name. # Default value `SaslConstants.JAAS_DEFAULT_BROKER_SECTION_NAME`, which is "Broker". -saslJaasBrokerSectionName= +saslJaasServerSectionName= ### --- HTTP Server config --- ### @@ -874,8 +923,11 @@ managedLedgerDefaultAckQuorum=2 # in case of lack of enough bookies #bookkeeper_opportunisticStriping=false -# you can add other configuration options for the BookKeeper client -# by prefixing them with bookkeeper_ +# You can add other configuration options for the BookKeeper client +# by prefixing them with "bookkeeper_". These configurations are applied +# to all bookkeeper clients started by the broker (including the managed ledger bookkeeper clients as well as +# the BookkeeperPackagesStorage bookkeeper client), except the distributed log bookkeeper client. +# The dlog bookkeeper client is configured in the functions worker configuration file. # How frequently to flush the cursor positions that were accumulated due to rate limiting. (seconds). # Default is 60 seconds @@ -1002,6 +1054,9 @@ loadBalancerEnabled=true # Percentage of change to trigger load report update loadBalancerReportUpdateThresholdPercentage=10 +# minimum interval to update load report +loadBalancerReportUpdateMinIntervalMillis=5000 + # maximum interval to update load report loadBalancerReportUpdateMaxIntervalMinutes=15 @@ -1216,12 +1271,10 @@ schemaRegistryStorageClassName=org.apache.pulsar.broker.service.schema.Bookkeepe # if you enable this setting, it will cause non-java clients failed to produce. isSchemaValidationEnforced=false -# The schema compatibility strategy in broker level. If this config in namespace policy is `UNDEFINED`, -# broker will use it in broker level. If schemaCompatibilityStrategy is `UNDEFINED` will use `FULL`. -# SchemaCompatibilityStrategy : UNDEFINED, ALWAYS_INCOMPATIBLE, ALWAYS_COMPATIBLE, BACKWARD, FORWARD, +# The schema compatibility strategy in broker level. +# SchemaCompatibilityStrategy : ALWAYS_INCOMPATIBLE, ALWAYS_COMPATIBLE, BACKWARD, FORWARD, # FULL, BACKWARD_TRANSITIVE, FORWARD_TRANSITIVE, FULL_TRANSITIVE -# default : UNDEFINED -schemaCompatibilityStrategy= +schemaCompatibilityStrategy=FULL ### --- Ledger Offloading --- ### @@ -1304,6 +1357,15 @@ transactionBufferSnapshotMaxTransactionCount=1000 # Unit : millisecond transactionBufferSnapshotMinTimeInMillis=5000 +# The max concurrent requests for transaction buffer client, default is 1000 +transactionBufferClientMaxConcurrentRequests=1000 + +# MLPendingAckStore maintains a ConcurrentSkipListMap pendingAckLogIndex, +# It stores the position in pendingAckStore as its value and saves a position used to determine +# whether the previous data can be cleaned up as a key. +# transactionPendingAckLogIndexMinLag is used to configure the minimum lag between indexes +transactionPendingAckLogIndexMinLag=500 + ### --- Packages management service configuration variables (begin) --- ### # Enable the packages management service or not @@ -1319,4 +1381,8 @@ packagesReplicas=1 # The bookkeeper ledger root path packagesManagementLedgerRootPath=/ledgers +# When using BookKeeperPackagesStorageProvider, you can configure the +# bookkeeper client by prefixing configurations with "bookkeeper_". +# This config applies to managed ledger bookkeeper clients, as well. + ### --- Packages management service configuration variables (end) --- ### diff --git a/conf/functions_worker.yml b/conf/functions_worker.yml index 58dfc692617d3..a0449cbb2365b 100644 --- a/conf/functions_worker.yml +++ b/conf/functions_worker.yml @@ -206,6 +206,7 @@ functionRuntimeFactoryConfigs: # # The port inside the function pod which is used by the worker to communicate with the pod # grpcPort: 9093 # # The port inside the function pod on which prometheus metrics are exposed +# # An empty value disables prometheus metrics. # metricsPort: 9094 # # The directory inside the function pod where nar packages will be extracted # narExtractionDirectory: @@ -216,6 +217,10 @@ functionRuntimeFactoryConfigs: # extraFunctionDependenciesDir: # # Additional memory padding added on top of the memory requested by the function per on a per instance basis # percentMemoryPadding: 10 +# # The duration in seconds before the StatefulSet deleted on function stop/restart. +# # Value must be non-negative integer. The value zero indicates delete immediately. +# # Default is 5 seconds. +# gracePeriodSeconds: 5 ## A set of the minimum amount of resources functions must request. ## Support for this depends on function runtime. @@ -296,6 +301,46 @@ tlsAllowInsecureConnection: false tlsEnableHostnameVerification: false # Tls cert refresh duration in seconds (set 0 to check on every new connection) tlsCertRefreshCheckDurationSec: 300 +# Whether client certificates are required for TLS. Connections are rejected if the client +# certificate isn't trusted. +tlsRequireTrustedClientCertOnConnect: false + +### --- TLS config variables --- ### +## Note that some of the above TLS configs also apply to the KeyStore TLS configuration. + +# Specify the TLS provider for the web service: SunJSSE, Conscrypt and etc. +tlsProvider: Conscrypt + +# Enable TLS with KeyStore type configuration in function worker. +tlsEnabledWithKeyStore: false + +# TLS KeyStore type configuration in function worker: JKS, PKCS12 +tlsKeyStoreType: JKS + +# TLS KeyStore path in function worker +tlsKeyStore: + +# TLS KeyStore password for function worker +tlsKeyStorePassword: + +# TLS TrustStore type configuration in function worker: JKS, PKCS12 +tlsTrustStoreType: JKS + +# TLS TrustStore path in function worker +tlsTrustStore: + +# TLS TrustStore password in function worker, default value is empty password +tlsTrustStorePassword: + +# Specify the tls protocols the function worker's web service will use to negotiate during TLS handshake +# (a comma-separated list of protocol names). +# Examples:- [TLSv1.3, TLSv1.2] +webServiceTlsProtocols: + +# Specify the tls cipher the function worker will use to negotiate during TLS Handshake +# (a comma-separated list of ciphers). +# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] +webServiceTlsCiphers: ######################## # State Management @@ -317,3 +362,23 @@ validateConnectorConfig: false # Whether to initialize distributed log metadata by runtime. # If it is set to true, you must ensure that it has been initialized by "bin/pulsar initialize-cluster-metadata" command. initializedDlogMetadata: false +########################### +# Arbitrary Configuration +########################### +# When a configuration parameter is not explicitly named in the WorkerConfig class, it is only accessible from the +# properties map. This map can be configured by supplying values to the properties map in this config file. + +# Configure the DLog bookkeeper client by prefixing configurations with "bookkeeper_". Because these are arbitrary, they +# must be added to the properties map to get correctly applied. This configuration applies to the Dlog bookkeeper client +# in both the standalone function workers and function workers initialized in the broker. + +# You can add extra configuration options for the Pulsar Client and the Pulsar Admin Client +# by prefixing them with "brokerClient_". These configurations are applied after hard coded configuration +# and before the above brokerClient configurations named above. + +## For example, when using the token authentication provider (AuthenticationProviderToken), you must configure several +## custom configurations. Here is a sample for configuring one of the necessary configs: +#properties: +# tokenPublicKey: "file:///path/to/my/key" +# tokenPublicAlg: "RSA256" + diff --git a/conf/log4j2.yaml b/conf/log4j2.yaml index a76d58e804f96..71698d7206787 100644 --- a/conf/log4j2.yaml +++ b/conf/log4j2.yaml @@ -33,7 +33,7 @@ Configuration: - name: "pulsar.log.appender" value: "RoutingAppender" - name: "pulsar.log.root.level" - value: "debug" + value: "info" - name: "pulsar.log.level" value: "info" - name: "pulsar.routing.appender.default" diff --git a/conf/presto/catalog/pulsar.properties b/conf/presto/catalog/pulsar.properties index 1f5a89a8b7cc7..e273b98dccc95 100644 --- a/conf/presto/catalog/pulsar.properties +++ b/conf/presto/catalog/pulsar.properties @@ -42,7 +42,8 @@ pulsar.max-split-queue-cache-size=-1 # to prevent erroneous rewriting pulsar.namespace-delimiter-rewrite-enable=false pulsar.rewrite-namespace-delimiter=/ - +# max size of one batch message (default value is 5MB) +# pulsar.max-message-size=5242880 ####### TIERED STORAGE OFFLOADER CONFIGS ####### diff --git a/conf/presto/jvm.config b/conf/presto/jvm.config index 4a431b63367e3..3e1cdd6a24bc7 100644 --- a/conf/presto/jvm.config +++ b/conf/presto/jvm.config @@ -27,3 +27,4 @@ -XX:+ExitOnOutOfMemoryError -Dpresto-temporarily-allow-java8=true -Djdk.attach.allowAttachSelf=true +-javaagent:java-version-trim-agent.jar \ No newline at end of file diff --git a/conf/proxy.conf b/conf/proxy.conf index fbb009878e315..77ab31b80cf4e 100644 --- a/conf/proxy.conf +++ b/conf/proxy.conf @@ -68,6 +68,44 @@ webServicePort=8080 # Port to use to server HTTPS request webServicePortTls= +# Number of threads used for Netty IO. Default is set to `2 * Runtime.getRuntime().availableProcessors()` +numIOThreads= + +# Number of threads used for Netty Acceptor. Default is set to `1` +numAcceptorThreads= + +### --- TLS config variables --- ### +## Note that some of the above TLS configs also apply to the KeyStore TLS configuration. + +# Specify the TLS provider for the broker service: +# When using TLS authentication with CACert, the valid value is either OPENSSL or JDK. +# When using TLS authentication with KeyStore, available values can be SunJSSE, Conscrypt and etc. +tlsProvider= + +# Specify the TLS provider for the web service, available values can be SunJSSE, Conscrypt and etc. +webServiceTlsProvider=Conscrypt + +# Enable TLS with KeyStore type configuration in proxy. +tlsEnabledWithKeyStore=false + +# TLS KeyStore type configuration in proxy: JKS, PKCS12 +tlsKeyStoreType=JKS + +# TLS KeyStore path in proxy +tlsKeyStore= + +# TLS KeyStore password for proxy +tlsKeyStorePassword= + +# TLS TrustStore type configuration in proxy: JKS, PKCS12 +tlsTrustStoreType=JKS + +# TLS TrustStore path in proxy +tlsTrustStore= + +# TLS TrustStore password in proxy, default value is empty password +tlsTrustStorePassword= + # Path for the file used to determine the rotation status for the proxy instance when responding # to service discovery health checks statusFilePath= @@ -125,6 +163,10 @@ tlsEnabledWithBroker=false # Tls cert refresh duration in seconds (set 0 to check on every new connection) tlsCertRefreshCheckDurationSec=300 +# You can add extra configuration options for the Pulsar Client +# by prefixing them with "brokerClient_". These configurations are applied after hard coded configuration +# and before the above brokerClient configurations named above. + ##### --- Rate Limiting --- ##### # Max concurrent inbound connections. The proxy will reject requests beyond that. @@ -196,6 +238,13 @@ httpRequestsLimitEnabled=false httpRequestsMaxPerSecond=100.0 +## Configure the datasource of basic authenticate, supports the file and Base64 format. +# file: +# basicAuthConf=/path/my/.htpasswd +# use Base64 to encode the contents of .htpasswd: +# basicAuthConf=YOUR-BASE64-DATA +basicAuthConf= + ### --- Token Authentication Provider --- ### ## Symmetric key diff --git a/conf/pulsar_tools_env.sh b/conf/pulsar_tools_env.sh index 0f14977954971..80f8d3f1d160f 100755 --- a/conf/pulsar_tools_env.sh +++ b/conf/pulsar_tools_env.sh @@ -42,10 +42,10 @@ # PULSAR_GLOBAL_ZK_CONF= # Extra options to be passed to the jvm -PULSAR_MEM="-Xmx128m -XX:MaxDirectMemorySize=128m" +PULSAR_MEM=${PULSAR_MEM:-"-Xmx128m -XX:MaxDirectMemorySize=128m"} # Garbage collection options -PULSAR_GC=" -client " +PULSAR_GC=${PULSAR_GC:-" -client "} # Extra options to be passed to the jvm PULSAR_EXTRA_OPTS="${PULSAR_EXTRA_OPTS} ${PULSAR_MEM} ${PULSAR_GC} ${PULSAR_GC_LOG} -Dio.netty.leakDetectionLevel=disabled" diff --git a/conf/standalone.conf b/conf/standalone.conf index 2e0273a2f3ea0..67c0c6b89fa2d 100644 --- a/conf/standalone.conf +++ b/conf/standalone.conf @@ -176,6 +176,10 @@ defaultNumberOfNamespaceBundles=4 # Using a value of 0, is disabling maxTopicsPerNamespace-limit check. maxTopicsPerNamespace=0 +# Allow schema to be auto updated at broker level. User can override this by +# 'is_allow_auto_update_schema' of namespace policy. +isAllowAutoUpdateSchemaEnabled=true + # Enable check for minimum allowed client library version clientLibraryVersionCheckEnabled=false @@ -243,6 +247,15 @@ dispatchThrottlingRateRelativeToPublishRate=false # backlog. dispatchThrottlingOnNonBacklogConsumerEnabled=true +# The read failure backoff initial time in milliseconds. By default it is 15s. +dispatcherReadFailureBackoffInitialTimeInMs=15000 + +# The read failure backoff max time in milliseconds. By default it is 60s. +dispatcherReadFailureBackoffMaxTimeInMs=60000 + +# The read failure backoff mandatory stop time in milliseconds. By default it is 0s. +dispatcherReadFailureBackoffMandatoryStopTimeInMs=0 + # Precise dispathcer flow control according to history message number of each entry preciseDispatcherFlowControl=false @@ -344,13 +357,18 @@ tlsCiphers= # authentication. tlsRequireTrustedClientCertOnConnect=false +# Specify the TLS provider for the broker service: +# When using TLS authentication with CACert, the valid value is either OPENSSL or JDK. +# When using TLS authentication with KeyStore, available values can be SunJSSE, Conscrypt and etc. +tlsProvider= + +# Specify the TLS provider for the web service: SunJSSE, Conscrypt and etc. +webServiceTlsProvider=Conscrypt + ### --- KeyStore TLS config variables --- ### # Enable TLS with KeyStore type configuration in broker. tlsEnabledWithKeyStore=false -# TLS Provider for KeyStore type -tlsProvider= - # TLS KeyStore type configuration in broker: JKS, PKCS12 tlsKeyStoreType=JKS @@ -451,6 +469,12 @@ athenzDomainNames= # When this parameter is not empty, unauthenticated users perform as anonymousUserRole anonymousUserRole= +## Configure the datasource of basic authenticate, supports the file and Base64 format. +# file: +# basicAuthConf=/path/my/.htpasswd +# use Base64 to encode the contents of .htpasswd: +# basicAuthConf=YOUR-BASE64-DATA +basicAuthConf= ### --- Token Authentication Provider --- ### @@ -724,6 +748,9 @@ loadBalancerEnabled=false # Percentage of change to trigger load report update loadBalancerReportUpdateThresholdPercentage=10 +# minimum interval to update load report +loadBalancerReportUpdateMinIntervalMillis=5000 + # maximum interval to update load report loadBalancerReportUpdateMaxIntervalMinutes=15 diff --git a/conf/websocket.conf b/conf/websocket.conf index 814e58753ef7b..4fe6f7e37b6d8 100644 --- a/conf/websocket.conf +++ b/conf/websocket.conf @@ -92,10 +92,15 @@ brokerClientAuthenticationPlugin= brokerClientAuthenticationParameters= brokerClientTrustCertsFilePath= +# You can add extra configuration options for the Pulsar Client +# by prefixing them with "brokerClient_". These configurations are applied after hard coded configuration +# and before the above brokerClient configurations named above. + # When this parameter is not empty, unauthenticated users perform as anonymousUserRole anonymousUserRole= ### --- TLS --- ### +## Note that some of the above TLS configs also apply to the KeyStore TLS configuration. # Deprecated - use webServicePortTls and brokerClientTlsEnabled instead tlsEnabled=false @@ -119,6 +124,40 @@ tlsRequireTrustedClientCertOnConnect=false # Tls cert refresh duration in seconds (set 0 to check on every new connection) tlsCertRefreshCheckDurationSec=300 +# Specify the TLS provider for the WebSocket: SunJSSE, Conscrypt and etc. +tlsProvider=Conscrypt + +# Enable TLS with KeyStore type configuration in WebSocket. +tlsEnabledWithKeyStore=false + +# TLS KeyStore type configuration in WebSocket: JKS, PKCS12 +tlsKeyStoreType=JKS + +# TLS KeyStore path in WebSocket +tlsKeyStore= + +# TLS KeyStore password for WebSocket +tlsKeyStorePassword= + +# TLS TrustStore type configuration in WebSocket: JKS, PKCS12 +tlsTrustStoreType=JKS + +# TLS TrustStore path in WebSocket +tlsTrustStore= + +# TLS TrustStore password in WebSocket, default value is empty password +tlsTrustStorePassword= + +# Specify the tls protocols the proxy's web service will use to negotiate during TLS handshake +# (a comma-separated list of protocol names). +# Examples:- [TLSv1.3, TLSv1.2] +webServiceTlsProtocols= + +# Specify the tls cipher the proxy will use to negotiate during TLS Handshake +# (a comma-separated list of ciphers). +# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] +webServiceTlsCiphers= + ### --- Deprecated config variables --- ### # Deprecated. Use configurationStoreServers diff --git a/deployment/terraform-ansible/deploy-pulsar.yaml b/deployment/terraform-ansible/deploy-pulsar.yaml index c016d548e6af1..db180ddfeb94c 100644 --- a/deployment/terraform-ansible/deploy-pulsar.yaml +++ b/deployment/terraform-ansible/deploy-pulsar.yaml @@ -39,7 +39,7 @@ zookeeper_servers: "{{ groups['zookeeper']|map('extract', hostvars, ['ansible_default_ipv4', 'address'])|map('regex_replace', '^(.*)$', '\\1:2181') | join(',') }}" service_url: "{{ pulsar_service_url }}" http_url: "{{ pulsar_web_url }}" - pulsar_version: "2.8.1" + pulsar_version: "2.9.3" - name: Download Pulsar binary package unarchive: src: https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=pulsar/pulsar-{{ pulsar_version }}/apache-pulsar-{{ pulsar_version }}-bin.tar.gz diff --git a/deployment/terraform-ansible/templates/broker.conf b/deployment/terraform-ansible/templates/broker.conf index 9936105c21095..39adc6b0a2076 100644 --- a/deployment/terraform-ansible/templates/broker.conf +++ b/deployment/terraform-ansible/templates/broker.conf @@ -613,7 +613,7 @@ saslJaasClientAllowedIds= # Service Principal, for login context name. # Default value `SaslConstants.JAAS_DEFAULT_BROKER_SECTION_NAME`, which is "Broker". -saslJaasBrokerSectionName= +saslJaasServerSectionName= ### --- HTTP Server config --- ### diff --git a/distribution/io/pom.xml b/distribution/io/pom.xml index 86a8bbdaeb479..a1a63301483ea 100644 --- a/distribution/io/pom.xml +++ b/distribution/io/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar distribution - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/distribution/offloaders/pom.xml b/distribution/offloaders/pom.xml index 352ed71e6d868..07bc62ffb7603 100644 --- a/distribution/offloaders/pom.xml +++ b/distribution/offloaders/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar distribution - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/distribution/pom.xml b/distribution/pom.xml index 381e7194a68df..4959dd90662de 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/distribution/server/pom.xml b/distribution/server/pom.xml index 72aab71e6eb80..eb8c3577724b3 100644 --- a/distribution/server/pom.xml +++ b/distribution/server/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar distribution - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -100,11 +100,6 @@ ${project.version} - - org.apache.logging.log4j - log4j-1.2-api - - org.apache.logging.log4j log4j-api diff --git a/distribution/server/src/assemble/LICENSE.bin.txt b/distribution/server/src/assemble/LICENSE.bin.txt index 6da3b3a47d91e..5296c8002838f 100644 --- a/distribution/server/src/assemble/LICENSE.bin.txt +++ b/distribution/server/src/assemble/LICENSE.bin.txt @@ -312,27 +312,27 @@ The Apache Software License, Version 2.0 * JCommander -- com.beust-jcommander-1.78.jar * High Performance Primitive Collections for Java -- com.carrotsearch-hppc-0.7.3.jar * Jackson - - com.fasterxml.jackson.core-jackson-annotations-2.12.3.jar - - com.fasterxml.jackson.core-jackson-core-2.12.3.jar - - com.fasterxml.jackson.core-jackson-databind-2.12.3.jar - - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.12.3.jar - - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.12.3.jar - - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.12.3.jar - - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.12.3.jar - - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.12.3.jar + - com.fasterxml.jackson.core-jackson-annotations-2.13.4.jar + - com.fasterxml.jackson.core-jackson-core-2.13.4.jar + - com.fasterxml.jackson.core-jackson-databind-2.13.4.jar + - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.13.4.jar + - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.13.4.jar + - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.13.4.jar + - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.13.4.jar + - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.13.4.jar * Caffeine -- com.github.ben-manes.caffeine-caffeine-2.9.1.jar * Conscrypt -- org.conscrypt-conscrypt-openjdk-uber-2.5.2.jar - * Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-1.17.0.jar + * Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-2.0.1.jar * Bitbucket -- org.bitbucket.b_c-jose4j-0.7.6.jar * Gson - - com.google.code.gson-gson-2.8.6.jar + - com.google.code.gson-gson-2.8.9.jar - io.gsonfire-gson-fire-1.8.5.jar * Guava - com.google.guava-guava-30.1-jre.jar - com.google.guava-failureaccess-1.0.1.jar - com.google.guava-listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar * J2ObjC Annotations -- com.google.j2objc-j2objc-annotations-1.3.jar - * Netty Reactive Streams -- com.typesafe.netty-netty-reactive-streams-2.0.4.jar + * Netty Reactive Streams -- com.typesafe.netty-netty-reactive-streams-2.0.6.jar * Swagger - io.swagger-swagger-annotations-1.6.2.jar - io.swagger-swagger-core-1.6.2.jar @@ -341,7 +341,7 @@ The Apache Software License, Version 2.0 - com.yahoo.datasketches-memory-0.8.3.jar - com.yahoo.datasketches-sketches-core-0.8.3.jar * Apache Commons - - commons-cli-commons-cli-1.2.jar + - commons-cli-commons-cli-1.5.0.jar - commons-codec-commons-codec-1.15.jar - commons-collections-commons-collections-3.2.2.jar - commons-configuration-commons-configuration-1.10.jar @@ -352,24 +352,31 @@ The Apache Software License, Version 2.0 - org.apache.commons-commons-compress-1.21.jar - org.apache.commons-commons-lang3-3.11.jar * Netty - - io.netty-netty-buffer-4.1.68.Final.jar - - io.netty-netty-codec-4.1.68.Final.jar - - io.netty-netty-codec-dns-4.1.68.Final.jar - - io.netty-netty-codec-http-4.1.68.Final.jar - - io.netty-netty-codec-http2-4.1.68.Final.jar - - io.netty-netty-codec-socks-4.1.68.Final.jar - - io.netty-netty-codec-haproxy-4.1.68.Final.jar - - io.netty-netty-common-4.1.68.Final.jar - - io.netty-netty-handler-4.1.68.Final.jar - - io.netty-netty-handler-proxy-4.1.68.Final.jar - - io.netty-netty-resolver-4.1.68.Final.jar - - io.netty-netty-resolver-dns-4.1.68.Final.jar - - io.netty-netty-transport-4.1.68.Final.jar - - io.netty-netty-transport-native-epoll-4.1.68.Final-linux-x86_64.jar - - io.netty-netty-transport-native-epoll-4.1.68.Final.jar - - io.netty-netty-transport-native-unix-common-4.1.68.Final.jar - - io.netty-netty-transport-native-unix-common-4.1.68.Final-linux-x86_64.jar - - io.netty-netty-tcnative-boringssl-static-2.0.42.Final.jar + - io.netty-netty-buffer-4.1.77.Final.jar + - io.netty-netty-codec-4.1.77.Final.jar + - io.netty-netty-codec-dns-4.1.77.Final.jar + - io.netty-netty-codec-http-4.1.77.Final.jar + - io.netty-netty-codec-http2-4.1.77.Final.jar + - io.netty-netty-codec-socks-4.1.77.Final.jar + - io.netty-netty-codec-haproxy-4.1.77.Final.jar + - io.netty-netty-common-4.1.77.Final.jar + - io.netty-netty-handler-4.1.77.Final.jar + - io.netty-netty-handler-proxy-4.1.77.Final.jar + - io.netty-netty-resolver-4.1.77.Final.jar + - io.netty-netty-resolver-dns-4.1.77.Final.jar + - io.netty-netty-transport-4.1.77.Final.jar + - io.netty-netty-transport-classes-epoll-4.1.77.Final.jar + - io.netty-netty-transport-native-epoll-4.1.77.Final-linux-x86_64.jar + - io.netty-netty-transport-native-epoll-4.1.77.Final.jar + - io.netty-netty-transport-native-unix-common-4.1.77.Final.jar + - io.netty-netty-transport-native-unix-common-4.1.77.Final-linux-x86_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.52.Final.jar + - io.netty-netty-tcnative-boringssl-static-2.0.52.Final-linux-aarch_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.52.Final-linux-x86_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.52.Final-osx-aarch_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.52.Final-osx-x86_64.jar + - io.netty-netty-tcnative-boringssl-static-2.0.52.Final-windows-x86_64.jar + - io.netty-netty-tcnative-classes-2.0.52.Final.jar * Prometheus client - io.prometheus-simpleclient-0.5.0.jar - io.prometheus-simpleclient_common-0.5.0.jar @@ -383,38 +390,37 @@ The Apache Software License, Version 2.0 - jakarta.validation-jakarta.validation-api-2.0.2.jar - javax.validation-validation-api-1.1.0.Final.jar * Log4J - - org.apache.logging.log4j-log4j-api-2.14.0.jar - - org.apache.logging.log4j-log4j-core-2.14.0.jar - - org.apache.logging.log4j-log4j-slf4j-impl-2.14.0.jar - - org.apache.logging.log4j-log4j-web-2.14.0.jar - - org.apache.logging.log4j-log4j-1.2-api-2.14.0.jar + - org.apache.logging.log4j-log4j-api-2.18.0.jar + - org.apache.logging.log4j-log4j-core-2.18.0.jar + - org.apache.logging.log4j-log4j-slf4j-impl-2.18.0.jar + - org.apache.logging.log4j-log4j-web-2.18.0.jar * Java Native Access JNA -- net.java.dev.jna-jna-4.2.0.jar * BookKeeper - - org.apache.bookkeeper-bookkeeper-common-4.14.2.jar - - org.apache.bookkeeper-bookkeeper-common-allocator-4.14.2.jar - - org.apache.bookkeeper-bookkeeper-proto-4.14.2.jar - - org.apache.bookkeeper-bookkeeper-server-4.14.2.jar - - org.apache.bookkeeper-bookkeeper-tools-framework-4.14.2.jar - - org.apache.bookkeeper-circe-checksum-4.14.2.jar - - org.apache.bookkeeper-cpu-affinity-4.14.2.jar - - org.apache.bookkeeper-statelib-4.14.2.jar - - org.apache.bookkeeper-stream-storage-api-4.14.2.jar - - org.apache.bookkeeper-stream-storage-common-4.14.2.jar - - org.apache.bookkeeper-stream-storage-java-client-4.14.2.jar - - org.apache.bookkeeper-stream-storage-java-client-base-4.14.2.jar - - org.apache.bookkeeper-stream-storage-proto-4.14.2.jar - - org.apache.bookkeeper-stream-storage-server-4.14.2.jar - - org.apache.bookkeeper-stream-storage-service-api-4.14.2.jar - - org.apache.bookkeeper-stream-storage-service-impl-4.14.2.jar - - org.apache.bookkeeper.http-http-server-4.14.2.jar - - org.apache.bookkeeper.http-vertx-http-server-4.14.2.jar - - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.14.2.jar - - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.14.2.jar - - org.apache.distributedlog-distributedlog-common-4.14.2.jar - - org.apache.distributedlog-distributedlog-core-4.14.2-tests.jar - - org.apache.distributedlog-distributedlog-core-4.14.2.jar - - org.apache.distributedlog-distributedlog-protocol-4.14.2.jar - - org.apache.bookkeeper.stats-codahale-metrics-provider-4.14.2.jar + - org.apache.bookkeeper-bookkeeper-common-4.14.5.jar + - org.apache.bookkeeper-bookkeeper-common-allocator-4.14.5.jar + - org.apache.bookkeeper-bookkeeper-proto-4.14.5.jar + - org.apache.bookkeeper-bookkeeper-server-4.14.5.jar + - org.apache.bookkeeper-bookkeeper-tools-framework-4.14.5.jar + - org.apache.bookkeeper-circe-checksum-4.14.5.jar + - org.apache.bookkeeper-cpu-affinity-4.14.5.jar + - org.apache.bookkeeper-statelib-4.14.5.jar + - org.apache.bookkeeper-stream-storage-api-4.14.5.jar + - org.apache.bookkeeper-stream-storage-common-4.14.5.jar + - org.apache.bookkeeper-stream-storage-java-client-4.14.5.jar + - org.apache.bookkeeper-stream-storage-java-client-base-4.14.5.jar + - org.apache.bookkeeper-stream-storage-proto-4.14.5.jar + - org.apache.bookkeeper-stream-storage-server-4.14.5.jar + - org.apache.bookkeeper-stream-storage-service-api-4.14.5.jar + - org.apache.bookkeeper-stream-storage-service-impl-4.14.5.jar + - org.apache.bookkeeper.http-http-server-4.14.5.jar + - org.apache.bookkeeper.http-vertx-http-server-4.14.5.jar + - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.14.5.jar + - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.14.5.jar + - org.apache.distributedlog-distributedlog-common-4.14.5.jar + - org.apache.distributedlog-distributedlog-core-4.14.5-tests.jar + - org.apache.distributedlog-distributedlog-core-4.14.5.jar + - org.apache.distributedlog-distributedlog-protocol-4.14.5.jar + - org.apache.bookkeeper.stats-codahale-metrics-provider-4.14.5.jar * Apache HTTP Client - org.apache.httpcomponents-httpclient-4.5.13.jar - org.apache.httpcomponents-httpcore-4.4.13.jar @@ -424,54 +430,61 @@ The Apache Software License, Version 2.0 - org.asynchttpclient-async-http-client-2.12.1.jar - org.asynchttpclient-async-http-client-netty-utils-2.12.1.jar * Jetty - - org.eclipse.jetty-jetty-client-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-continuation-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-http-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-io-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-proxy-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-security-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-server-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-servlet-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-servlets-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-util-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-util-ajax-9.4.43.v20210629.jar - - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.43.v20210629.jar - - org.eclipse.jetty.websocket-websocket-api-9.4.43.v20210629.jar - - org.eclipse.jetty.websocket-websocket-client-9.4.43.v20210629.jar - - org.eclipse.jetty.websocket-websocket-common-9.4.43.v20210629.jar - - org.eclipse.jetty.websocket-websocket-server-9.4.43.v20210629.jar - - org.eclipse.jetty.websocket-websocket-servlet-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.43.v20210629.jar - - org.eclipse.jetty-jetty-alpn-server-9.4.43.v20210629.jar - * SnakeYaml -- org.yaml-snakeyaml-1.27.jar - * RocksDB - org.rocksdb-rocksdbjni-6.10.2.jar + - org.eclipse.jetty-jetty-client-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-continuation-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-http-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-io-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-proxy-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-security-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-server-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-servlet-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-servlets-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-util-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-util-ajax-9.4.48.v20220622.jar + - org.eclipse.jetty.websocket-javax-websocket-client-impl-9.4.48.v20220622.jar + - org.eclipse.jetty.websocket-websocket-api-9.4.48.v20220622.jar + - org.eclipse.jetty.websocket-websocket-client-9.4.48.v20220622.jar + - org.eclipse.jetty.websocket-websocket-common-9.4.48.v20220622.jar + - org.eclipse.jetty.websocket-websocket-server-9.4.48.v20220622.jar + - org.eclipse.jetty.websocket-websocket-servlet-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.48.v20220622.jar + - org.eclipse.jetty-jetty-alpn-server-9.4.48.v20220622.jar + * SnakeYaml -- org.yaml-snakeyaml-1.31.jar + * RocksDB - org.rocksdb-rocksdbjni-6.16.4.jar * Google Error Prone Annotations - com.google.errorprone-error_prone_annotations-2.5.1.jar - * Apache Thrifth - org.apache.thrift-libthrift-0.14.2.jar + * Apache Thrift - org.apache.thrift-libthrift-0.14.2.jar * OkHttp3 - - com.squareup.okhttp3-logging-interceptor-3.14.9.jar - - com.squareup.okhttp3-okhttp-3.14.9.jar - * Okio - com.squareup.okio-okio-1.17.2.jar + - com.squareup.okhttp3-logging-interceptor-4.9.3.jar + - com.squareup.okhttp3-okhttp-4.9.3.jar + * Okio - com.squareup.okio-okio-2.8.0.jar * Javassist -- org.javassist-javassist-3.25.0-GA.jar + * Kotlin Standard Lib + - org.jetbrains.kotlin-kotlin-stdlib-1.4.32.jar + - org.jetbrains.kotlin-kotlin-stdlib-common-1.4.32.jar + - org.jetbrains.kotlin-kotlin-stdlib-jdk7-1.4.32.jar + - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.4.32.jar + - org.jetbrains-annotations-13.0.jar * gRPC - - io.grpc-grpc-all-1.33.0.jar - - io.grpc-grpc-auth-1.33.0.jar - - io.grpc-grpc-context-1.33.0.jar - - io.grpc-grpc-core-1.33.0.jar - - io.grpc-grpc-netty-1.33.0.jar - - io.grpc-grpc-protobuf-1.33.0.jar - - io.grpc-grpc-protobuf-lite-1.33.0.jar - - io.grpc-grpc-stub-1.33.0.jar - - io.grpc-grpc-alts-1.33.0.jar - - io.grpc-grpc-api-1.33.0.jar - - io.grpc-grpc-grpclb-1.33.0.jar - - io.grpc-grpc-netty-shaded-1.33.0.jar - - io.grpc-grpc-services-1.33.0.jar - - io.grpc-grpc-xds-1.33.0.jar + - io.grpc-grpc-all-1.45.1.jar + - io.grpc-grpc-auth-1.45.1.jar + - io.grpc-grpc-context-1.45.1.jar + - io.grpc-grpc-core-1.45.1.jar + - io.grpc-grpc-netty-1.45.1.jar + - io.grpc-grpc-protobuf-1.45.1.jar + - io.grpc-grpc-protobuf-lite-1.45.1.jar + - io.grpc-grpc-stub-1.45.1.jar + - io.grpc-grpc-alts-1.45.1.jar + - io.grpc-grpc-api-1.45.1.jar + - io.grpc-grpc-grpclb-1.45.1.jar + - io.grpc-grpc-netty-shaded-1.45.1.jar + - io.grpc-grpc-services-1.45.1.jar + - io.grpc-grpc-xds-1.45.1.jar + - io.grpc-grpc-rls-1.45.1.jar * Perfmark - io.perfmark-perfmark-api-0.19.0.jar * OpenCensus - - io.opencensus-opencensus-api-0.18.0.jar - - io.opencensus-opencensus-contrib-http-util-0.24.0.jar + - io.opencensus-opencensus-api-0.28.0.jar + - io.opencensus-opencensus-contrib-http-util-0.28.0.jar - io.opencensus-opencensus-proto-0.2.0.jar * Jodah - net.jodah-typetools-0.5.0.jar @@ -513,15 +526,20 @@ The Apache Software License, Version 2.0 * Snappy Java - org.xerial.snappy-snappy-java-1.1.7.jar * Google HTTP Client - - com.google.http-client-google-http-client-jackson2-1.34.0.jar - - com.google.http-client-google-http-client-1.34.0.jar - - com.google.auto.value-auto-value-annotations-1.7.jar - - com.google.re2j-re2j-1.2.jar + - com.google.http-client-google-http-client-jackson2-1.41.0.jar + - com.google.http-client-google-http-client-gson-1.41.0.jar + - com.google.http-client-google-http-client-1.41.0.jar + - com.google.auto.value-auto-value-annotations-1.9.jar + - com.google.re2j-re2j-1.5.jar + * IPAddress + - com.github.seancfoley-ipaddress-5.3.3.jar + * RoaringBitmap + - org.roaringbitmap-RoaringBitmap-0.9.15.jar BSD 3-clause "New" or "Revised" License * Google auth library - - com.google.auth-google-auth-library-credentials-0.20.0.jar -- licenses/LICENSE-google-auth-library.txt - - com.google.auth-google-auth-library-oauth2-http-0.20.0.jar -- licenses/LICENSE-google-auth-library.txt + - com.google.auth-google-auth-library-credentials-1.4.0.jar -- licenses/LICENSE-google-auth-library.txt + - com.google.auth-google-auth-library-oauth2-http-1.4.0.jar -- licenses/LICENSE-google-auth-library.txt * LevelDB -- (included in org.rocksdb.*.jar) -- licenses/LICENSE-LevelDB.txt * JSR305 -- com.google.code.findbugs-jsr305-3.0.2.jar -- licenses/LICENSE-JSR305.txt * JLine -- jline-jline-2.14.6.jar -- licenses/LICENSE-JLine.txt @@ -532,16 +550,16 @@ BSD 2-Clause License MIT License * Java SemVer -- com.github.zafarkhaja-java-semver-0.9.0.jar -- licenses/LICENSE-SemVer.txt * SLF4J -- licenses/LICENSE-SLF4J.txt - - org.slf4j-jul-to-slf4j-1.7.25.jar - - org.slf4j-slf4j-api-1.7.25.jar - - org.slf4j-jcl-over-slf4j-1.7.25.jar + - org.slf4j-jul-to-slf4j-1.7.32.jar + - org.slf4j-slf4j-api-1.7.32.jar + - org.slf4j-jcl-over-slf4j-1.7.32.jar * The Checker Framework - org.checkerframework-checker-qual-3.5.0.jar Protocol Buffers License * Protocol Buffers - - com.google.protobuf-protobuf-java-3.11.4.jar -- licenses/LICENSE-protobuf.txt - - com.google.protobuf-protobuf-java-util-3.11.4.jar -- licenses/LICENSE-protobuf.txt + - com.google.protobuf-protobuf-java-3.19.6.jar -- licenses/LICENSE-protobuf.txt + - com.google.protobuf-protobuf-java-util-3.19.6.jar -- licenses/LICENSE-protobuf.txt CDDL-1.1 -- licenses/LICENSE-CDDL-1.1.txt * Java Annotations API diff --git a/distribution/server/src/assemble/NOTICE.bin.txt b/distribution/server/src/assemble/NOTICE.bin.txt index 661921cc81684..bc5e2e6d63b0e 100644 --- a/distribution/server/src/assemble/NOTICE.bin.txt +++ b/distribution/server/src/assemble/NOTICE.bin.txt @@ -1,6 +1,6 @@ Apache Pulsar -Copyright 2017-2021 The Apache Software Foundation +Copyright 2017-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/docker/grafana/pom.xml b/docker/grafana/pom.xml index b91947eb1fe84..4437c3fd48406 100644 --- a/docker/grafana/pom.xml +++ b/docker/grafana/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar docker-images - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 grafana-docker-image diff --git a/docker/pom.xml b/docker/pom.xml index 1d106d77247e2..80db6b2797ab6 100644 --- a/docker/pom.xml +++ b/docker/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 docker-images Apache Pulsar :: Docker Images diff --git a/docker/pulsar-all/pom.xml b/docker/pulsar-all/pom.xml index 11ff264b16abe..be92352bd46bc 100644 --- a/docker/pulsar-all/pom.xml +++ b/docker/pulsar-all/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar docker-images - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 pulsar-all-docker-image diff --git a/docker/pulsar/Dockerfile b/docker/pulsar/Dockerfile index d30c6c3b681a7..ce7be90e223f1 100644 --- a/docker/pulsar/Dockerfile +++ b/docker/pulsar/Dockerfile @@ -46,7 +46,7 @@ ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update \ && apt-get -y dist-upgrade \ && apt-get -y install openjdk-11-jdk-headless netcat dnsutils less procps iputils-ping \ - python3 python3-dev python3-setuptools python3-yaml python3-kazoo \ + python3 python3-dev python3-setuptools python3-kazoo \ libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev \ curl \ && apt-get -y --purge autoremove \ @@ -58,6 +58,7 @@ RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py RUN python3 get-pip.py RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 10 +RUN pip3 install pyyaml==5.4.1 ENV JAVA_HOME /usr/lib/jvm/java-11-openjdk-amd64 RUN echo networkaddress.cache.ttl=1 >> /usr/lib/jvm/java-11-openjdk-amd64/conf/security/java.security diff --git a/docker/pulsar/pom.xml b/docker/pulsar/pom.xml index 06e21e9d46b02..8a64a759375c0 100644 --- a/docker/pulsar/pom.xml +++ b/docker/pulsar/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar docker-images - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 pulsar-docker-image @@ -75,7 +75,7 @@ ${project.basedir}/../../pulsar-client-cpp/docker/build-wheels.sh - 3.8 cp38-cp38 + 3.8 cp38-cp38 manylinux2014 x86_64 diff --git a/docker/pulsar/scripts/apply-config-from-env-with-prefix.py b/docker/pulsar/scripts/apply-config-from-env-with-prefix.py index a3a4adafcabfb..3f6bc2e4d3b85 100755 --- a/docker/pulsar/scripts/apply-config-from-env-with-prefix.py +++ b/docker/pulsar/scripts/apply-config-from-env-with-prefix.py @@ -60,7 +60,7 @@ v = os.environ[k].strip() # Hide the value in logs if is password. - if "password" in k: + if "password" in k.lower(): displayValue = "********" else: displayValue = v @@ -80,7 +80,7 @@ continue # Hide the value in logs if is password. - if "password" in k: + if "password" in k.lower(): displayValue = "********" else: displayValue = v diff --git a/docker/pulsar/scripts/apply-config-from-env.py b/docker/pulsar/scripts/apply-config-from-env.py index 6e2eb746cd4e9..b9757d77f5ef5 100755 --- a/docker/pulsar/scripts/apply-config-from-env.py +++ b/docker/pulsar/scripts/apply-config-from-env.py @@ -62,7 +62,7 @@ v = os.environ[k].strip() # Hide the value in logs if is password. - if "password" in k: + if "password" in k.lower(): displayValue = "********" else: displayValue = v @@ -82,7 +82,7 @@ continue # Hide the value in logs if is password. - if "password" in k: + if "password" in k.lower(): displayValue = "********" else: displayValue = v diff --git a/docker/pulsar/scripts/gen-yml-from-env.py b/docker/pulsar/scripts/gen-yml-from-env.py index 8aee68b5282db..779341d1269cc 100755 --- a/docker/pulsar/scripts/gen-yml-from-env.py +++ b/docker/pulsar/scripts/gen-yml-from-env.py @@ -47,7 +47,9 @@ 'proxyRoles', 'schemaRegistryCompatibilityCheckers', 'brokerClientTlsCiphers', - 'brokerClientTlsProtocols' + 'brokerClientTlsProtocols', + 'webServiceTlsCiphers', + 'webServiceTlsProtocols', ] PF_ENV_PREFIX = 'PF_' diff --git a/jclouds-shaded/pom.xml b/jclouds-shaded/pom.xml index 1214656f427ff..c690773089f8e 100644 --- a/jclouds-shaded/pom.xml +++ b/jclouds-shaded/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -39,6 +39,11 @@ jclouds-allblobstore ${jclouds.version} + + org.apache.jclouds.driver + jclouds-slf4j + ${jclouds.version} + javax.annotation javax.annotation-api diff --git a/kafka-connect-avro-converter-shaded/pom.xml b/kafka-connect-avro-converter-shaded/pom.xml index 62814c939879d..7c426485b04e7 100644 --- a/kafka-connect-avro-converter-shaded/pom.xml +++ b/kafka-connect-avro-converter-shaded/pom.xml @@ -26,7 +26,7 @@ pulsar org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/managed-ledger/pom.xml b/managed-ledger/pom.xml index 5bbb0448022cb..0ea22a9fa36ad 100644 --- a/managed-ledger/pom.xml +++ b/managed-ledger/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -94,15 +94,6 @@ snappy-java test - - - junit - junit - test - org.awaitility @@ -110,12 +101,6 @@ test - - org.apache.logging.log4j - log4j-1.2-api - test - - org.slf4j slf4j-api diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloader.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloader.java index ead9db73965e4..4718801b475c8 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloader.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloader.java @@ -90,6 +90,7 @@ default CompletableFuture AsyncClose() { // TODO: improve the user metadata in subsequent changes String METADATA_SOFTWARE_VERSION_KEY = "S3ManagedLedgerOffloaderSoftwareVersion"; String METADATA_SOFTWARE_GITSHA_KEY = "S3ManagedLedgerOffloaderSoftwareGitSha"; + String METADATA_PULSAR_CLUSTER_NAME = "pulsarClusterName"; /** * Get offload driver name. diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java index 4af64550fe53b..f67cd96ea9f35 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java @@ -425,7 +425,11 @@ void markDelete(Position position, Map properties) * @param newReadPosition * the position where to move the cursor */ - void seek(Position newReadPosition); + default void seek(Position newReadPosition) { + seek(newReadPosition, false); + } + + void seek(Position newReadPosition, boolean force); /** * Clear the cursor backlog. @@ -529,10 +533,14 @@ void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate * * @param position * position to move the cursor to + * @param forceReset + * whether to force reset the position even if the position is no longer in the managed ledger, + * this is used by compacted topic which has data in the compacted ledger, to ensure the cursor can + * read data from the compacted ledger. * @param callback * callback object */ - void asyncResetCursor(final Position position, AsyncCallbacks.ResetCursorCallback callback); + void asyncResetCursor(Position position, boolean forceReset, AsyncCallbacks.ResetCursorCallback callback); /** * Read the specified set of positions from ManagedLedger. @@ -706,4 +714,10 @@ Set asyncReplayEntries( * @return if read position changed */ boolean checkAndUpdateReadPositionChanged(); + + /** + * Checks if the cursor is closed. + * @return whether this cursor is closed. + */ + public boolean isClosed(); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java index 74c67e965afc6..cd39919a3b357 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java @@ -259,7 +259,8 @@ public interface ManagedLedger { */ ManagedCursor newNonDurableCursor(Position startCursorPosition) throws ManagedLedgerException; ManagedCursor newNonDurableCursor(Position startPosition, String subscriptionName) throws ManagedLedgerException; - ManagedCursor newNonDurableCursor(Position startPosition, String subscriptionName, InitialPosition initialPosition) throws ManagedLedgerException; + ManagedCursor newNonDurableCursor(Position startPosition, String subscriptionName, InitialPosition initialPosition, + boolean isReadCompacted) throws ManagedLedgerException; /** * Delete a ManagedCursor asynchronously. @@ -287,6 +288,13 @@ public interface ManagedLedger { */ void deleteCursor(String name) throws InterruptedException, ManagedLedgerException; + /** + * Remove a ManagedCursor from this ManagedLedger's waitingCursors. + * + * @param cursor the ManagedCursor + */ + void removeWaitingCursor(ManagedCursor cursor); + /** * Open a ManagedCursor asynchronously. * diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java index 26cc9e1265925..0dc820ec46d72 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerException.java @@ -80,6 +80,10 @@ public ManagedLedgerFencedException() { super(new Exception("Attempted to use a fenced managed ledger")); } + public ManagedLedgerFencedException(String message) { + super(message); + } + public ManagedLedgerFencedException(Exception e) { super(e); } @@ -182,6 +186,20 @@ public ManagedLedgerFactoryClosedException(Throwable e) { } } + public static class ConcurrentWaitCallbackException extends ManagedLedgerException { + + public ConcurrentWaitCallbackException() { + super("We can only have a single waiting callback"); + } + } + + public static class OffloadReadHandleClosedException extends ManagedLedgerException { + + public OffloadReadHandleClosedException() { + super("Offload read handle already closed"); + } + } + @Override public synchronized Throwable fillInStackTrace() { // Disable stack traces to be filled in diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java index a00c161641083..25fcb377e3e11 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java @@ -86,4 +86,9 @@ public class ManagedLedgerFactoryConfig { * ManagedLedgerInfo compression type. If the compression type is null or invalid, don't compress data. */ private String managedLedgerInfoCompressionType = MLDataFormats.CompressionType.NONE.name(); + + /** + * ManagedCursorInfo compression type. If the compression type is null or invalid, don't compress data. + */ + private String managedCursorInfoCompressionType = MLDataFormats.CompressionType.NONE.name(); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheImpl.java index 6a0ac2c650c49..085923e25ccdf 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheImpl.java @@ -209,14 +209,8 @@ private void asyncReadEntry0(ReadHandle lh, PositionImpl position, final ReadEnt manager.mlFactoryMBean.recordCacheHit(cachedEntry.getLength()); callback.readEntryComplete(cachedEntry, ctx); } else { - lh.readAsync(position.getEntryId(), position.getEntryId()).whenCompleteAsync( - (ledgerEntries, exception) -> { - if (exception != null) { - ml.invalidateLedgerHandle(lh); - callback.readEntryFailed(createManagedLedgerException(exception), ctx); - return; - } - + lh.readAsync(position.getEntryId(), position.getEntryId()).thenAcceptAsync( + ledgerEntries -> { try { Iterator iterator = ledgerEntries.iterator(); if (iterator.hasNext()) { @@ -234,12 +228,11 @@ private void asyncReadEntry0(ReadHandle lh, PositionImpl position, final ReadEnt } finally { ledgerEntries.close(); } - }, ml.getExecutor().chooseThread(ml.getName())).exceptionally(exception->{ - ml.invalidateLedgerHandle(lh); - callback.readEntryFailed(createManagedLedgerException(exception), ctx); - return null; - } - ); + }, ml.getExecutor().chooseThread(ml.getName())).exceptionally(exception -> { + ml.invalidateLedgerHandle(lh); + callback.readEntryFailed(createManagedLedgerException(exception), ctx); + return null; + }); } } @@ -297,20 +290,8 @@ private void asyncReadEntry0(ReadHandle lh, long firstEntry, long lastEntry, boo } // Read all the entries from bookkeeper - lh.readAsync(firstEntry, lastEntry).whenCompleteAsync( - (ledgerEntries, exception) -> { - if (exception != null) { - if (exception instanceof BKException - && ((BKException)exception).getCode() == BKException.Code.TooManyRequestsException) { - callback.readEntriesFailed(createManagedLedgerException(exception), ctx); - } else { - ml.invalidateLedgerHandle(lh); - ManagedLedgerException mlException = createManagedLedgerException(exception); - callback.readEntriesFailed(mlException, ctx); - } - return; - } - + lh.readAsync(firstEntry, lastEntry).thenAcceptAsync( + ledgerEntries -> { checkNotNull(ml.getName()); checkNotNull(ml.getExecutor()); @@ -333,17 +314,17 @@ private void asyncReadEntry0(ReadHandle lh, long firstEntry, long lastEntry, boo } finally { ledgerEntries.close(); } - }, ml.getExecutor().chooseThread(ml.getName())).exceptionally(exception->{ - if (exception instanceof BKException - && ((BKException)exception).getCode() == BKException.Code.TooManyRequestsException) { - callback.readEntriesFailed(createManagedLedgerException(exception), ctx); - } else { - ml.invalidateLedgerHandle(lh); - ManagedLedgerException mlException = createManagedLedgerException(exception); - callback.readEntriesFailed(mlException, ctx); - } - return null; - }); + }, ml.getExecutor().chooseThread(ml.getName())).exceptionally(exception -> { + if (exception instanceof BKException + && ((BKException)exception).getCode() == BKException.Code.TooManyRequestsException) { + callback.readEntriesFailed(createManagedLedgerException(exception), ctx); + } else { + ml.invalidateLedgerHandle(lh); + ManagedLedgerException mlException = createManagedLedgerException(exception); + callback.readEntriesFailed(mlException, ctx); + } + return null; + }); } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheManager.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheManager.java index c87bcb8aa4031..1c0c288e8784d 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheManager.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/EntryCacheManager.java @@ -193,12 +193,8 @@ public void invalidateEntriesBeforeTimestamp(long timestamp) { @Override public void asyncReadEntry(ReadHandle lh, long firstEntry, long lastEntry, boolean isSlowestReader, final ReadEntriesCallback callback, Object ctx) { - lh.readAsync(firstEntry, lastEntry).whenComplete( - (ledgerEntries, exception) -> { - if (exception != null) { - callback.readEntriesFailed(createManagedLedgerException(exception), ctx); - return; - } + lh.readAsync(firstEntry, lastEntry).thenAcceptAsync( + ledgerEntries -> { List entries = Lists.newArrayList(); long totalSize = 0; try { @@ -215,10 +211,10 @@ public void asyncReadEntry(ReadHandle lh, long firstEntry, long lastEntry, boole ml.mbean.addReadEntriesSample(entries.size(), totalSize); callback.readEntriesComplete(entries, ctx); - }).exceptionally(exception -> { - callback.readEntriesFailed(createManagedLedgerException(exception), ctx); - return null; - }); + }, ml.getExecutor().chooseThread(ml.getName())).exceptionally(exception -> { + callback.readEntriesFailed(createManagedLedgerException(exception), ctx); + return null; + }); } @Override diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java index 235736da760dc..e31d4eff88730 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java @@ -118,6 +118,11 @@ public class ManagedCursorImpl implements ManagedCursor { // this position is have persistent mark delete position protected volatile PositionImpl persistentMarkDeletePosition; + protected static final AtomicReferenceFieldUpdater + INPROGRESS_MARKDELETE_PERSIST_POSITION_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(ManagedCursorImpl.class, PositionImpl.class, + "inProgressMarkDeletePersistPosition"); + protected volatile PositionImpl inProgressMarkDeletePersistPosition; protected static final AtomicReferenceFieldUpdater READ_POSITION_UPDATER = AtomicReferenceFieldUpdater.newUpdater(ManagedCursorImpl.class, PositionImpl.class, "readPosition"); @@ -194,6 +199,7 @@ public class ManagedCursorImpl implements ManagedCursor { private long entriesReadCount; private long entriesReadSize; private int individualDeletedMessagesSerializedSize; + private static final String COMPACTION_CURSOR_NAME = "__compaction"; class MarkDeleteEntry { final PositionImpl newPosition; @@ -213,6 +219,31 @@ public MarkDeleteEntry(PositionImpl newPosition, Map properties, this.callback = callback; this.ctx = ctx; } + + public void triggerComplete() { + // Trigger the final callback after having (eventually) triggered the switchin-ledger operation. This + // will ensure that no race condition will happen between the next mark-delete and the switching + // operation. + if (callbackGroup != null) { + // Trigger the callback for every request in the group + for (MarkDeleteEntry e : callbackGroup) { + e.callback.markDeleteComplete(e.ctx); + } + } else if (callback != null) { + // Only trigger the callback for the current request + callback.markDeleteComplete(ctx); + } + } + + public void triggerFailed(ManagedLedgerException exception) { + if (callbackGroup != null) { + for (MarkDeleteEntry e : callbackGroup) { + e.callback.markDeleteFailed(exception, e.ctx); + } + } else if (callback != null) { + callback.markDeleteFailed(exception, ctx); + } + } } protected final ArrayDeque pendingMarkDeleteOps = new ArrayDeque<>(); @@ -235,7 +266,7 @@ enum State { Closed // The managed cursor has been closed } - private static final AtomicReferenceFieldUpdater STATE_UPDATER = + protected static final AtomicReferenceFieldUpdater STATE_UPDATER = AtomicReferenceFieldUpdater.newUpdater(ManagedCursorImpl.class, State.class, "state"); protected volatile State state = null; @@ -539,6 +570,7 @@ private void recoveredCursor(PositionImpl position, Map properties messagesConsumedCounter = -getNumberOfEntries(Range.openClosed(position, ledger.getLastPosition())); markDeletePosition = position; persistentMarkDeletePosition = position; + inProgressMarkDeletePersistPosition = null; readPosition = ledger.getNextValidPosition(position); lastMarkDeleteEntry = new MarkDeleteEntry(markDeletePosition, properties, null, null); // assign cursor-ledger so, it can be deleted when new ledger will be switched @@ -764,8 +796,8 @@ public void asyncReadEntriesOrWait(int maxEntries, long maxSizeBytes, ReadEntrie ctx, maxPosition); if (!WAITING_READ_OP_UPDATER.compareAndSet(this, null, op)) { - callback.readEntriesFailed(new ManagedLedgerException("We can only have a single waiting callback"), - ctx); + op.recycle(); + callback.readEntriesFailed(new ManagedLedgerException.ConcurrentWaitCallbackException(), ctx); return; } @@ -836,6 +868,7 @@ private void checkForNewEntries(OpReadEntry op, ReadEntriesCallback callback, Ob } } + @Override public boolean isClosed() { return state == State.Closed || state == State.Closing; } @@ -845,7 +878,11 @@ public boolean cancelPendingReadRequest() { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Cancel pending read request", ledger.getName(), name); } - return WAITING_READ_OP_UPDATER.getAndSet(this, null) != null; + final OpReadEntry op = WAITING_READ_OP_UPDATER.getAndSet(this, null); + if (op != null) { + op.recycle(); + } + return op != null; } public boolean hasPendingReadRequest() { @@ -931,7 +968,7 @@ public long getNumberOfEntriesInBacklog(boolean isPrecise) { } public long getNumberOfEntriesInStorage() { - return ledger.getNumberOfEntries(Range.openClosed(markDeletePosition, ledger.getLastPosition().getNext())); + return ledger.getNumberOfEntries(Range.openClosed(markDeletePosition, ledger.getLastPosition())); } @Override @@ -1044,6 +1081,7 @@ protected void internalResetCursor(PositionImpl position, AsyncCallbacks.ResetCu resetCursorCallback.resetFailed( new ManagedLedgerException.ConcurrentFindCursorPositionException("reset already in progress"), position); + return; } } @@ -1068,7 +1106,8 @@ public void operationComplete() { Range.closedOpen(markDeletePosition, newMarkDeletePosition))); } markDeletePosition = newMarkDeletePosition; - lastMarkDeleteEntry = new MarkDeleteEntry(newMarkDeletePosition, Collections.emptyMap(), + lastMarkDeleteEntry = new MarkDeleteEntry(newMarkDeletePosition, isCompactionCursor() ? + getProperties() : Collections.emptyMap(), null, null); individualDeletedMessages.clear(); if (config.isDeletionAtBatchIndexLevelEnabled() && batchDeletedIndexes != null) { @@ -1101,7 +1140,7 @@ public void operationComplete() { } } callback.resetComplete(newPosition); - + updateLastActive(); } @Override @@ -1118,7 +1157,11 @@ public void operationFailed(ManagedLedgerException exception) { }; - internalAsyncMarkDelete(newPosition, Collections.emptyMap(), new MarkDeleteCallback() { + persistentMarkDeletePosition = null; + inProgressMarkDeletePersistPosition = null; + lastMarkDeleteEntry = new MarkDeleteEntry(newPosition, getProperties(), null, null); + internalAsyncMarkDelete(newPosition, isCompactionCursor() ? getProperties() : Collections.emptyMap(), + new MarkDeleteCallback() { @Override public void markDeleteComplete(Object ctx) { finalCallback.operationComplete(); @@ -1132,7 +1175,7 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { } @Override - public void asyncResetCursor(Position newPos, AsyncCallbacks.ResetCursorCallback callback) { + public void asyncResetCursor(Position newPos, boolean forceReset, AsyncCallbacks.ResetCursorCallback callback) { checkArgument(newPos instanceof PositionImpl); final PositionImpl newPosition = (PositionImpl) newPos; @@ -1140,9 +1183,10 @@ public void asyncResetCursor(Position newPos, AsyncCallbacks.ResetCursorCallback ledger.getExecutor().executeOrdered(ledger.getName(), safeRun(() -> { PositionImpl actualPosition = newPosition; - if (!ledger.isValidPosition(actualPosition) && - !actualPosition.equals(PositionImpl.earliest) && - !actualPosition.equals(PositionImpl.latest)) { + if (!ledger.isValidPosition(actualPosition) + && !actualPosition.equals(PositionImpl.earliest) + && !actualPosition.equals(PositionImpl.latest) + && !forceReset) { actualPosition = ledger.getNextValidPosition(actualPosition); if (actualPosition == null) { @@ -1165,7 +1209,7 @@ class Result { final Result result = new Result(); final CountDownLatch counter = new CountDownLatch(1); - asyncResetCursor(newPos, new AsyncCallbacks.ResetCursorCallback() { + asyncResetCursor(newPos, false, new AsyncCallbacks.ResetCursorCallback() { @Override public void resetComplete(Object ctx) { counter.countDown(); @@ -1325,25 +1369,32 @@ protected long getNumberOfEntries(Range range) { lock.readLock().lock(); try { - individualDeletedMessages.forEach((r) -> { - try { - if (r.isConnected(range)) { - Range commonEntries = r.intersection(range); - long commonCount = ledger.getNumberOfEntries(commonEntries); - if (log.isDebugEnabled()) { - log.debug("[{}] [{}] Discounting {} entries for already deleted range {}", ledger.getName(), - name, commonCount, commonEntries); + if (config.isUnackedRangesOpenCacheSetEnabled()) { + int cardinality = individualDeletedMessages.cardinality( + range.lowerEndpoint().ledgerId, range.lowerEndpoint().entryId, + range.upperEndpoint().ledgerId, range.upperEndpoint().entryId); + deletedEntries.addAndGet(cardinality); + } else { + individualDeletedMessages.forEach((r) -> { + try { + if (r.isConnected(range)) { + Range commonEntries = r.intersection(range); + long commonCount = ledger.getNumberOfEntries(commonEntries); + if (log.isDebugEnabled()) { + log.debug("[{}] [{}] Discounting {} entries for already deleted range {}", + ledger.getName(), name, commonCount, commonEntries); + } + deletedEntries.addAndGet(commonCount); + } + return true; + } finally { + if (r.lowerEndpoint() instanceof PositionImplRecyclable) { + ((PositionImplRecyclable) r.lowerEndpoint()).recycle(); + ((PositionImplRecyclable) r.upperEndpoint()).recycle(); } - deletedEntries.addAndGet(commonCount); - } - return true; - } finally { - if (r.lowerEndpoint() instanceof PositionImplRecyclable) { - ((PositionImplRecyclable) r.lowerEndpoint()).recycle(); - ((PositionImplRecyclable) r.upperEndpoint()).recycle(); } - } - }, recyclePositionRangeConverter); + }, recyclePositionRangeConverter); + } } finally { lock.readLock().unlock(); } @@ -1563,6 +1614,9 @@ boolean hasMoreEntries(PositionImpl position) { void initializeCursorPosition(Pair lastPositionCounter) { readPosition = ledger.getNextValidPosition(lastPositionCounter.getLeft()); markDeletePosition = lastPositionCounter.getLeft(); + lastMarkDeleteEntry = new MarkDeleteEntry(markDeletePosition, getProperties(), null, null); + persistentMarkDeletePosition = null; + inProgressMarkDeletePersistPosition = null; // Initialize the counter such that the difference between the messages written on the ML and the // messagesConsumed is 0, to ensure the initial backlog count is 0. @@ -1577,7 +1631,9 @@ void initializeCursorPosition(Pair lastPositionCounter) { */ PositionImpl setAcknowledgedPosition(PositionImpl newMarkDeletePosition) { if (newMarkDeletePosition.compareTo(markDeletePosition) < 0) { - throw new IllegalArgumentException("Mark deleting an already mark-deleted position"); + throw new IllegalArgumentException( + "Mark deleting an already mark-deleted position. Current mark-delete: " + markDeletePosition + + " -- attempted mark delete: " + newMarkDeletePosition); } PositionImpl oldMarkDeletePosition = markDeletePosition; @@ -1693,7 +1749,8 @@ public void asyncMarkDelete(final Position position, Map propertie try { long ledgerEntries = ledger.getLedgerInfo(markDeletePosition.getLedgerId()).get().getEntries(); Long nextValidLedger = ledger.getNextValidLedger(ledger.getLastConfirmedEntry().getLedgerId()); - shouldCursorMoveForward = (markDeletePosition.getEntryId() + 1 >= ledgerEntries) + shouldCursorMoveForward = nextValidLedger != null + && (markDeletePosition.getEntryId() + 1 >= ledgerEntries) && (newPosition.getLedgerId() == nextValidLedger); } catch (Exception e) { log.warn("Failed to get ledger entries while setting mark-delete-position", e); @@ -1726,7 +1783,7 @@ public void asyncMarkDelete(final Position position, Map propertie // Apply rate limiting to mark-delete operations if (markDeleteLimiter != null && !markDeleteLimiter.tryAcquire()) { isDirty = true; - lastMarkDeleteEntry = new MarkDeleteEntry(newPosition, properties, null, null); + updateLastMarkDeleteEntryToLatest(newPosition, properties); callback.markDeleteComplete(ctx); return; } @@ -1749,8 +1806,10 @@ protected void internalAsyncMarkDelete(final PositionImpl newPosition, Map mdEntry.triggerComplete())); + return; + } + + PositionImpl inProgressLatest = INPROGRESS_MARKDELETE_PERSIST_POSITION_UPDATER.updateAndGet(this, current -> { + if (current != null && current.compareTo(mdEntry.newPosition) > 0) { + return current; + } else { + return mdEntry.newPosition; + } + }); + + // if there's a newer or equal mark delete update in progress, skip it. + if (inProgressLatest != mdEntry.newPosition) { + if (log.isInfoEnabled()) { + log.info("Skipping updating mark delete position to {}. The mark delete position update " + + "in progress {} is later.", mdEntry.newPosition, inProgressLatest); + } + // run with executor to prevent deadlock + ledger.getExecutor().executeOrdered(ledger.getName(), safeRun(() -> mdEntry.triggerComplete())); + return; + } + // The counter is used to mark all the pending mark-delete request that were submitted to BK and that are not // yet finished. While we have outstanding requests we cannot close the current ledger, so the switch to new // ledger is postponed to when the counter goes to 0. PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.incrementAndGet(this); - lastMarkDeleteEntry = mdEntry; + LAST_MARK_DELETE_ENTRY_UPDATER.updateAndGet(this, last -> { + if (last != null && last.newPosition.compareTo(mdEntry.newPosition) > 0) { + // keep the current value since it's later then the mdEntry.newPosition + return last; + } else { + return mdEntry; + } + }); persistPositionToLedger(cursorLedger, mdEntry, new VoidCallback() { @Override @@ -1790,6 +1886,9 @@ public void operationComplete() { mdEntry.newPosition); } + INPROGRESS_MARKDELETE_PERSIST_POSITION_UPDATER.compareAndSet(ManagedCursorImpl.this, + mdEntry.newPosition, null); + // Remove from the individual deleted messages all the entries before the new mark delete // point. lock.writeLock().lock(); @@ -1801,11 +1900,7 @@ public void operationComplete() { subMap.values().forEach(BitSetRecyclable::recycle); subMap.clear(); } - if (persistentMarkDeletePosition == null - || mdEntry.newPosition.compareTo(persistentMarkDeletePosition) > 0) { - persistentMarkDeletePosition = mdEntry.newPosition; - } - + persistentMarkDeletePosition = mdEntry.newPosition; } finally { lock.writeLock().unlock(); } @@ -1814,22 +1909,14 @@ public void operationComplete() { decrementPendingMarkDeleteCount(); - // Trigger the final callback after having (eventually) triggered the switchin-ledger operation. This - // will ensure that no race condition will happen between the next mark-delete and the switching - // operation. - if (mdEntry.callbackGroup != null) { - // Trigger the callback for every request in the group - for (MarkDeleteEntry e : mdEntry.callbackGroup) { - e.callback.markDeleteComplete(e.ctx); - } - } else { - // Only trigger the callback for the current request - mdEntry.callback.markDeleteComplete(mdEntry.ctx); - } + mdEntry.triggerComplete(); } @Override public void operationFailed(ManagedLedgerException exception) { + INPROGRESS_MARKDELETE_PERSIST_POSITION_UPDATER.compareAndSet(ManagedCursorImpl.this, + mdEntry.newPosition, null); + isDirty = true; log.warn("[{}] Failed to mark delete position for cursor={} position={}", ledger.getName(), ManagedCursorImpl.this, mdEntry.newPosition); if (log.isDebugEnabled()) { @@ -1839,13 +1926,7 @@ public void operationFailed(ManagedLedgerException exception) { decrementPendingMarkDeleteCount(); - if (mdEntry.callbackGroup != null) { - for (MarkDeleteEntry e : mdEntry.callbackGroup) { - e.callback.markDeleteFailed(exception, e.ctx); - } - } else { - mdEntry.callback.markDeleteFailed(exception, mdEntry.ctx); - } + mdEntry.triggerFailed(exception); } }); } @@ -1990,7 +2071,6 @@ public void asyncDelete(Iterable positions, AsyncCallbacks.DeleteCallb if (individualDeletedMessages.isEmpty()) { // No changes to individually deleted messages, so nothing to do at this point - callback.deleteComplete(ctx); return; } @@ -1998,6 +2078,19 @@ public void asyncDelete(Iterable positions, AsyncCallbacks.DeleteCallb // mark-delete to the upper bound of the first range segment Range range = individualDeletedMessages.firstRange(); + // If the upper bound is before the mark-delete position, we need to move ahead as these + // individualDeletedMessages are now irrelevant + if (range.upperEndpoint().compareTo(markDeletePosition) <= 0) { + individualDeletedMessages.removeAtMost(markDeletePosition.getLedgerId(), + markDeletePosition.getEntryId()); + range = individualDeletedMessages.firstRange(); + } + + if (range == null) { + // The set was completely cleaned up now + return; + } + // If the lowerBound is ahead of MarkDelete, verify if there are any entries in-between if (range.lowerEndpoint().compareTo(markDeletePosition) <= 0 || ledger .getNumberOfEntries(Range.openClosed(markDeletePosition, range.lowerEndpoint())) <= 0) { @@ -2022,14 +2115,15 @@ public void asyncDelete(Iterable positions, AsyncCallbacks.DeleteCallb return; } finally { lock.writeLock().unlock(); + if (individualDeletedMessages.isEmpty()) { + callback.deleteComplete(ctx); + } } // Apply rate limiting to mark-delete operations if (markDeleteLimiter != null && !markDeleteLimiter.tryAcquire()) { isDirty = true; - PositionImpl finalNewMarkDeletePosition = newMarkDeletePosition; - LAST_MARK_DELETE_ENTRY_UPDATER.updateAndGet(this, - last -> new MarkDeleteEntry(finalNewMarkDeletePosition, last.properties, null, null)); + updateLastMarkDeleteEntryToLatest(newMarkDeletePosition, null); callback.deleteComplete(ctx); return; } @@ -2061,6 +2155,22 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { } } + // update lastMarkDeleteEntry field if newPosition is later than the current lastMarkDeleteEntry.newPosition + private void updateLastMarkDeleteEntryToLatest(final PositionImpl newPosition, + final Map properties) { + LAST_MARK_DELETE_ENTRY_UPDATER.updateAndGet(this, last -> { + if (last != null && last.newPosition.compareTo(newPosition) > 0) { + // keep current value, don't update + return last; + } else { + // use given properties or when missing, use the properties from the previous field value + Map propertiesToUse = + properties != null ? properties : (last != null ? last.properties : Collections.emptyMap()); + return new MarkDeleteEntry(newPosition, propertiesToUse, null, null); + } + }); + } + /** * Given a list of entries, filter out the entries that have already been individually deleted. * @@ -2163,18 +2273,16 @@ public void rewind() { } @Override - public void seek(Position newReadPositionInt) { + public void seek(Position newReadPositionInt, boolean force) { checkArgument(newReadPositionInt instanceof PositionImpl); PositionImpl newReadPosition = (PositionImpl) newReadPositionInt; lock.writeLock().lock(); try { - if (newReadPosition.compareTo(markDeletePosition) <= 0) { + if (!force && newReadPosition.compareTo(markDeletePosition) <= 0) { // Make sure the newReadPosition comes after the mark delete position newReadPosition = ledger.getNextValidPosition(markDeletePosition); } - - PositionImpl oldReadPosition = readPosition; readPosition = newReadPosition; } finally { lock.writeLock().unlock(); @@ -2347,8 +2455,22 @@ public void asyncClose(final AsyncCallbacks.CloseCallback callback, final Object callback.closeComplete(ctx); return; } - persistPositionWhenClosing(lastMarkDeleteEntry.newPosition, lastMarkDeleteEntry.properties, callback, ctx); - STATE_UPDATER.set(this, State.Closed); + persistPositionWhenClosing(lastMarkDeleteEntry.newPosition, lastMarkDeleteEntry.properties, + new AsyncCallbacks.CloseCallback(){ + + @Override + public void closeComplete(Object ctx) { + STATE_UPDATER.set(ManagedCursorImpl.this, State.Closed); + callback.closeComplete(ctx); + } + + @Override + public void closeFailed(ManagedLedgerException exception, Object ctx) { + log.warn("[{}] [{}] persistent position failure when closing, the state will remain in" + + " state-closing and will no longer work", ledger.getName(), name); + callback.closeFailed(exception, ctx); + } + }, ctx); } /** @@ -2983,8 +3105,14 @@ public Range getLastIndividualDeletedRange() { @Override public void trimDeletedEntries(List entries) { - entries.removeIf(entry -> ((PositionImpl) entry.getPosition()).compareTo(markDeletePosition) <= 0 - || individualDeletedMessages.contains(entry.getLedgerId(), entry.getEntryId())); + entries.removeIf(entry -> { + boolean isDeleted = ((PositionImpl) entry.getPosition()).compareTo(markDeletePosition) <= 0 + || individualDeletedMessages.contains(entry.getLedgerId(), entry.getEntryId()); + if (isDeleted) { + entry.release(); + } + return isDeleted; + }); } private ManagedCursorImpl cursorImpl() { @@ -3068,5 +3196,9 @@ public boolean checkAndUpdateReadPositionChanged() { return isReadPositionOnTail || isReadPositionChanged; } + private boolean isCompactionCursor() { + return COMPACTION_CURSOR_NAME.equals(name); + } + private static final Logger log = LoggerFactory.getLogger(ManagedCursorImpl.class); } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java index 94dee65fafd59..e3d24f5522211 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java @@ -20,6 +20,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.bookkeeper.mledger.ManagedLedgerException.getManagedLedgerException; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.base.Predicates; import com.google.common.collect.Maps; import io.netty.util.concurrent.DefaultThreadFactory; @@ -183,13 +184,14 @@ private ManagedLedgerFactoryImpl(MetadataStoreExtended metadataStore, this.bookkeeperFactory = bookKeeperGroupFactory; this.isBookkeeperManaged = isBookkeeperManaged; this.metadataStore = metadataStore; - this.store = new MetaStoreImpl(metadataStore, scheduledExecutor, config.getManagedLedgerInfoCompressionType()); + this.store = new MetaStoreImpl(metadataStore, scheduledExecutor, config.getManagedLedgerInfoCompressionType(), + config.getManagedCursorInfoCompressionType()); this.config = config; this.mbean = new ManagedLedgerFactoryMBeanImpl(this); this.entryCacheManager = new EntryCacheManager(this); - this.statsTask = scheduledExecutor.scheduleAtFixedRate(this::refreshStats, + this.statsTask = scheduledExecutor.scheduleWithFixedDelay(catchingAndLoggingThrowables(this::refreshStats), 0, config.getStatsPeriodSeconds(), TimeUnit.SECONDS); - this.flushCursorsTask = scheduledExecutor.scheduleAtFixedRate(this::flushCursors, + this.flushCursorsTask = scheduledExecutor.scheduleAtFixedRate(catchingAndLoggingThrowables(this::flushCursors), config.getCursorPositionFlushSeconds(), config.getCursorPositionFlushSeconds(), TimeUnit.SECONDS); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java index 0bc88c561170c..752f2fb960be0 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java @@ -152,8 +152,11 @@ public class ManagedLedgerImpl implements ManagedLedger, CreateCallback { protected Map propertiesMap; protected final MetaStore store; - final ConcurrentLongHashMap> ledgerCache = new ConcurrentLongHashMap<>( - 16 /* initial capacity */, 1 /* number of sections */); + final ConcurrentLongHashMap> ledgerCache = + ConcurrentLongHashMap.>newBuilder() + .expectedItems(16) // initial capacity + .concurrencyLevel(1) // number of sections + .build(); protected final NavigableMap ledgers = new ConcurrentSkipListMap<>(); private volatile Stat ledgersStat; @@ -399,6 +402,7 @@ public void operationComplete(ManagedLedgerInfo mlInfo, Stat stat) { @Override public void operationFailed(MetaStoreException e) { + handleBadVersion(e); if (e instanceof MetadataNotFoundException) { callback.initializeFailed(new ManagedLedgerNotFoundException(e)); } else { @@ -449,6 +453,7 @@ public void operationComplete(Void v, Stat stat) { @Override public void operationFailed(MetaStoreException e) { + handleBadVersion(e); callback.initializeFailed(new ManagedLedgerException(e)); } }; @@ -699,10 +704,14 @@ public void asyncAddEntry(ByteBuf buffer, AddEntryCallback callback, Object ctx) log.debug("[{}] asyncAddEntry size={} state={}", name, buffer.readableBytes(), state); } - OpAddEntry addOperation = OpAddEntry.create(this, buffer, callback, ctx); + // retain buffer in this thread + buffer.retain(); // Jump to specific thread to avoid contention from writers writing from different threads - executor.executeOrdered(name, safeRun(() -> internalAsyncAddEntry(addOperation))); + executor.executeOrdered(name, safeRun(() -> { + OpAddEntry addOperation = OpAddEntry.createNoRetainBuffer(this, buffer, callback, ctx); + internalAsyncAddEntry(addOperation); + })); } @Override @@ -711,17 +720,20 @@ public void asyncAddEntry(ByteBuf buffer, int numberOfMessages, AddEntryCallback log.debug("[{}] asyncAddEntry size={} state={}", name, buffer.readableBytes(), state); } - OpAddEntry addOperation = OpAddEntry.create(this, buffer, numberOfMessages, callback, ctx); + // retain buffer in this thread + buffer.retain(); // Jump to specific thread to avoid contention from writers writing from different threads - executor.executeOrdered(name, safeRun(() -> internalAsyncAddEntry(addOperation))); + executor.executeOrdered(name, safeRun(() -> { + OpAddEntry addOperation = OpAddEntry.createNoRetainBuffer(this, buffer, numberOfMessages, callback, ctx); + internalAsyncAddEntry(addOperation); + })); } private synchronized void internalAsyncAddEntry(OpAddEntry addOperation) { if (!beforeAddEntry(addOperation)) { return; } - pendingAddEntries.add(addOperation); final State state = STATE_UPDATER.get(this); if (state == State.Fenced) { addOperation.failed(new ManagedLedgerFencedException()); @@ -733,10 +745,10 @@ private synchronized void internalAsyncAddEntry(OpAddEntry addOperation) { addOperation.failed(new ManagedLedgerAlreadyClosedException("Managed ledger was already closed")); return; } else if (state == State.WriteFailed) { - pendingAddEntries.remove(addOperation); addOperation.failed(new ManagedLedgerAlreadyClosedException("Waiting to recover from failure")); return; } + pendingAddEntries.add(addOperation); if (state == State.ClosingLedger || state == State.CreatingLedger) { // We don't have a ready ledger to write into @@ -755,8 +767,8 @@ private synchronized void internalAsyncAddEntry(OpAddEntry addOperation) { } } else if (state == State.ClosedLedger) { // No ledger and no pending operations. Create a new ledger - log.info("[{}] Creating a new ledger", name); if (STATE_UPDATER.compareAndSet(this, State.ClosedLedger, State.CreatingLedger)) { + log.info("[{}] Creating a new ledger", name); this.lastLedgerCreationInitiationTimestamp = System.currentTimeMillis(); mbean.startDataLedgerCreateOp(); asyncCreateLedger(bookKeeper, config, digestType, this, Collections.emptyMap()); @@ -976,6 +988,7 @@ public void operationComplete(Void result, Stat stat) { @Override public void operationFailed(MetaStoreException e) { + handleBadVersion(e); callback.deleteCursorFailed(e, ctx); } @@ -1023,11 +1036,12 @@ public ManagedCursor newNonDurableCursor(Position startCursorPosition) throws Ma @Override public ManagedCursor newNonDurableCursor(Position startPosition, String subscriptionName) throws ManagedLedgerException { - return newNonDurableCursor(startPosition, subscriptionName, InitialPosition.Latest); + return newNonDurableCursor(startPosition, subscriptionName, InitialPosition.Latest, false); } @Override - public ManagedCursor newNonDurableCursor(Position startCursorPosition, String cursorName, InitialPosition initialPosition) + public ManagedCursor newNonDurableCursor(Position startCursorPosition, String cursorName, InitialPosition initialPosition, + boolean isReadCompacted) throws ManagedLedgerException { Objects.requireNonNull(cursorName, "cursor name can't be null"); checkManagedLedgerIsOpen(); @@ -1042,7 +1056,7 @@ public ManagedCursor newNonDurableCursor(Position startCursorPosition, String cu } NonDurableCursorImpl cursor = new NonDurableCursorImpl(bookKeeper, config, this, cursorName, - (PositionImpl) startCursorPosition, initialPosition); + (PositionImpl) startCursorPosition, initialPosition, isReadCompacted); cursor.setActive(); log.info("[{}] Opened new cursor: {}", name, cursor); @@ -1225,6 +1239,7 @@ public void operationComplete(Void result, Stat stat) { @Override public void operationFailed(MetaStoreException e) { log.error("[{}] Failed to terminate managed ledger: {}", name, e.getMessage()); + handleBadVersion(e); callback.terminateFailed(new ManagedLedgerException(e), ctx); } }); @@ -1309,6 +1324,7 @@ public void closeFailed(ManagedLedgerException exception, Object ctx) { public synchronized void asyncClose(final CloseCallback callback, final Object ctx) { State state = STATE_UPDATER.get(this); if (state == State.Fenced) { + cancelScheduledTasks(); factory.close(this); callback.closeFailed(new ManagedLedgerFencedException(), ctx); return; @@ -1324,14 +1340,7 @@ public synchronized void asyncClose(final CloseCallback callback, final Object c factory.close(this); STATE_UPDATER.set(this, State.Closed); - - if (this.timeoutTask != null) { - this.timeoutTask.cancel(false); - } - - if (this.checkLedgerRollTask != null) { - this.checkLedgerRollTask.cancel(false); - } + cancelScheduledTasks(); LedgerHandle lh = currentLedger; @@ -1413,11 +1422,7 @@ public synchronized void createComplete(int rc, final LedgerHandle lh, Object ct lastLedgerCreationFailureTimestamp = clock.millis(); } else { log.info("[{}] Created new ledger {}", name, lh.getId()); - ledgers.put(lh.getId(), LedgerInfo.newBuilder().setLedgerId(lh.getId()).setTimestamp(0).build()); - currentLedger = lh; - currentLedgerEntries = 0; - currentLedgerSize = 0; - + LedgerInfo newLedger = LedgerInfo.newBuilder().setLedgerId(lh.getId()).setTimestamp(0).build(); final MetaStoreCallback cb = new MetaStoreCallback() { @Override public void operationComplete(Void v, Stat stat) { @@ -1425,6 +1430,10 @@ public void operationComplete(Void v, Stat stat) { log.debug("[{}] Updating of ledgers list after create complete. version={}", name, stat); } ledgersStat = stat; + ledgers.put(lh.getId(), newLedger); + currentLedger = lh; + currentLedgerEntries = 0; + currentLedgerSize = 0; metadataMutex.unlock(); updateLedgersIdsComplete(stat); synchronized (ManagedLedgerImpl.this) { @@ -1438,12 +1447,22 @@ public void operationComplete(Void v, Stat stat) { @Override public void operationFailed(MetaStoreException e) { + log.warn("[{}] Error updating meta data with the new list of ledgers: {}", name, e.getMessage()); + handleBadVersion(e); + mbean.startDataLedgerDeleteOp(); + bookKeeper.asyncDeleteLedger(lh.getId(), (rc1, ctx1) -> { + mbean.endDataLedgerDeleteOp(); + if (rc1 != BKException.Code.OK) { + log.warn("[{}] Failed to delete ledger {}: {}", name, lh.getId(), + BKException.getMessage(rc1)); + } + }, null); if (e instanceof BadVersionException) { synchronized (ManagedLedgerImpl.this) { log.error( - "[{}] Failed to update ledger list. z-node version mismatch. Closing managed ledger", - name); - STATE_UPDATER.set(ManagedLedgerImpl.this, State.Fenced); + "[{}] Failed to update ledger list. z-node version mismatch. Closing managed ledger", + name); + lastLedgerCreationFailureTimestamp = clock.millis(); // Return ManagedLedgerFencedException to addFailed callback // to indicate that the ledger is now fenced and topic needs to be closed clearPendingAddEntries(new ManagedLedgerFencedException(e)); @@ -1452,19 +1471,6 @@ public void operationFailed(MetaStoreException e) { } } - log.warn("[{}] Error updating meta data with the new list of ledgers: {}", name, e.getMessage()); - - // Remove the ledger, since we failed to update the list - ledgers.remove(lh.getId()); - mbean.startDataLedgerDeleteOp(); - bookKeeper.asyncDeleteLedger(lh.getId(), (rc1, ctx1) -> { - mbean.endDataLedgerDeleteOp(); - if (rc1 != BKException.Code.OK) { - log.warn("[{}] Failed to delete ledger {}: {}", name, lh.getId(), - BKException.getMessage(rc1)); - } - }, null); - metadataMutex.unlock(); synchronized (ManagedLedgerImpl.this) { @@ -1475,21 +1481,28 @@ public void operationFailed(MetaStoreException e) { } }; - updateLedgersListAfterRollover(cb); + updateLedgersListAfterRollover(cb, newLedger); } } - private void updateLedgersListAfterRollover(MetaStoreCallback callback) { + private void handleBadVersion(Throwable e) { + if (e instanceof BadVersionException) { + setFenced(); + } + } + private void updateLedgersListAfterRollover(MetaStoreCallback callback, LedgerInfo newLedger) { if (!metadataMutex.tryLock()) { // Defer update for later - scheduledExecutor.schedule(() -> updateLedgersListAfterRollover(callback), 100, TimeUnit.MILLISECONDS); + scheduledExecutor.schedule(() -> updateLedgersListAfterRollover(callback, newLedger), + 100, TimeUnit.MILLISECONDS); return; } if (log.isDebugEnabled()) { log.debug("[{}] Updating ledgers ids with new ledger. version={}", name, ledgersStat); } - store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, callback); + ManagedLedgerInfo mlInfo = getManagedLedgerInfo(newLedger); + store.asyncUpdateLedgerIds(name, mlInfo, ledgersStat, callback); } public synchronized void updateLedgersIdsComplete(Stat stat) { @@ -1509,9 +1522,7 @@ public synchronized void updateLedgersIdsComplete(Stat stat) { // If op is used by another ledger handle, we need to close it and create a new one if (existsOp.ledger != null) { existsOp.close(); - existsOp = OpAddEntry.create(existsOp.ml, existsOp.data, existsOp.getNumberOfMessages(), existsOp.callback, existsOp.ctx); - // release the extra retain - ReferenceCountUtil.release(existsOp.data); + existsOp = OpAddEntry.createNoRetainBuffer(existsOp.ml, existsOp.data, existsOp.getNumberOfMessages(), existsOp.callback, existsOp.ctx); } existsOp.setLedger(currentLedger); pendingAddEntries.add(existsOp); @@ -1589,12 +1600,15 @@ synchronized void ledgerClosed(final LedgerHandle lh) { } synchronized void createLedgerAfterClosed() { - if(isNeededCreateNewLedgerAfterCloseLedger()) { - log.info("[{}] Creating a new ledger", name); + if (isNeededCreateNewLedgerAfterCloseLedger()) { + log.info("[{}] Creating a new ledger after closed", name); STATE_UPDATER.set(this, State.CreatingLedger); this.lastLedgerCreationInitiationTimestamp = System.currentTimeMillis(); mbean.startDataLedgerCreateOp(); - asyncCreateLedger(bookKeeper, config, digestType, this, Collections.emptyMap()); + // Use the executor here is to avoid use the Zookeeper thread to create the ledger which will lead + // to deadlock at the zookeeper client, details to see https://github.com/apache/pulsar/issues/13736 + this.executor.execute(() -> + asyncCreateLedger(bookKeeper, config, digestType, this, Collections.emptyMap())); } } @@ -1610,8 +1624,8 @@ boolean isNeededCreateNewLedgerAfterCloseLedger() { @Override public void rollCurrentLedgerIfFull() { log.info("[{}] Start checking if current ledger is full", name); - if (currentLedgerEntries > 0 && currentLedgerIsFull()) { - STATE_UPDATER.set(this, State.ClosingLedger); + if (currentLedgerEntries > 0 && currentLedgerIsFull() + && STATE_UPDATER.compareAndSet(this, State.LedgerOpened, State.ClosingLedger)) { currentLedger.asyncClose(new AsyncCallback.CloseCallback() { @Override public void closeComplete(int rc, LedgerHandle lh, Object o) { @@ -1680,7 +1694,6 @@ public ManagedLedgerInterceptor getManagedLedgerInterceptor() { void clearPendingAddEntries(ManagedLedgerException e) { while (!pendingAddEntries.isEmpty()) { OpAddEntry op = pendingAddEntries.poll(); - op.close(); op.failed(e); } } @@ -1723,7 +1736,12 @@ void asyncReadEntries(OpReadEntry opReadEntry) { } public CompletableFuture getLedgerMetadata(long ledgerId) { - return getLedgerHandle(ledgerId).thenApply(rh -> rh.getLedgerMetadata().toSafeString()); + LedgerHandle currentLedger = this.currentLedger; + if (currentLedger != null && ledgerId == currentLedger.getId()) { + return CompletableFuture.completedFuture(currentLedger.getLedgerMetadata().toSafeString()); + } else { + return getLedgerHandle(ledgerId).thenApply(rh -> rh.getLedgerMetadata().toSafeString()); + } } @Override @@ -1828,21 +1846,19 @@ public void asyncReadEntry(PositionImpl position, ReadEntryCallback callback, Ob if (log.isDebugEnabled()) { log.debug("[{}] Reading entry ledger {}: {}", name, position.getLedgerId(), position.getEntryId()); } - if (!ledgers.containsKey(position.getLedgerId())) { - log.error("[{}] Failed to get message with ledger {}:{} the ledgerId does not belong to this topic " - + "or has been deleted.", name, position.getLedgerId(), position.getEntryId()); - callback.readEntryFailed(new ManagedLedgerException.NonRecoverableLedgerException("Message not found, " - + "the ledgerId does not belong to this topic or has been deleted"), ctx); - return; - } if (position.getLedgerId() == currentLedger.getId()) { asyncReadEntry(currentLedger, position, callback, ctx); - } else { + } else if (ledgers.containsKey(position.getLedgerId())) { getLedgerHandle(position.getLedgerId()).thenAccept(ledger -> asyncReadEntry(ledger, position, callback, ctx)).exceptionally(ex -> { log.error("[{}] Error opening ledger for reading at position {} - {}", name, position, ex.getMessage()); callback.readEntryFailed(ManagedLedgerException.getManagedLedgerException(ex.getCause()), ctx); return null; }); + } else { + log.error("[{}] Failed to get message with ledger {}:{} the ledgerId does not belong to this topic " + + "or has been deleted.", name, position.getLedgerId(), position.getEntryId()); + callback.readEntryFailed(new ManagedLedgerException.NonRecoverableLedgerException("Message not found, " + + "the ledgerId does not belong to this topic or has been deleted"), ctx); } } @@ -2151,14 +2167,9 @@ void updateCursor(ManagedCursorImpl cursor, PositionImpl newPosition) { } } - PositionImpl startReadOperationOnLedger(PositionImpl position, OpReadEntry opReadEntry) { + PositionImpl startReadOperationOnLedger(PositionImpl position) { Long ledgerId = ledgers.ceilingKey(position.getLedgerId()); - if (null == ledgerId) { - opReadEntry.readEntriesFailed(new ManagedLedgerException.NoMoreEntriesToReadException("The ceilingKey(K key) method is used to return the " + - "least key greater than or equal to the given key, or null if there is no such key"), null); - } - - if (ledgerId != position.getLedgerId()) { + if (ledgerId != null && ledgerId != position.getLedgerId()) { // The ledger pointed by this position does not exist anymore. It was deleted because it was empty. We need // to skip on the next available ledger position = new PositionImpl(ledgerId, 0); @@ -2299,13 +2310,13 @@ private void maybeOffload(CompletableFuture finalPromise) { + ", total size = {}, already offloaded = {}, to offload = {}", name, toOffload.stream().map(LedgerInfo::getLedgerId).collect(Collectors.toList()), sizeSummed, alreadyOffloadedSize, toOffloadSize); + offloadLoop(unlockingPromise, toOffload, PositionImpl.latest, Optional.empty()); } else { // offloadLoop will complete immediately with an empty list to offload log.debug("[{}] Nothing to offload, total size = {}, already offloaded = {}, threshold = {}", name, sizeSummed, alreadyOffloadedSize, threshold); + unlockingPromise.complete(PositionImpl.latest); } - - offloadLoop(unlockingPromise, toOffload, PositionImpl.latest, Optional.empty()); } } } @@ -2361,12 +2372,19 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { log.debug("[{}] Start TrimConsumedLedgers. ledgers={} totalSize={}", name, ledgers.keySet(), TOTAL_SIZE_UPDATER.get(this)); } - if (STATE_UPDATER.get(this) == State.Closed) { + State currentState = STATE_UPDATER.get(this); + if (currentState == State.Closed) { log.debug("[{}] Ignoring trimming request since the managed ledger was already closed", name); trimmerMutex.unlock(); promise.completeExceptionally(new ManagedLedgerAlreadyClosedException("Can't trim closed ledger")); return; } + if (currentState == State.Fenced) { + log.debug("[{}] Ignoring trimming request since the managed ledger was already fenced", name); + trimmerMutex.unlock(); + promise.completeExceptionally(new ManagedLedgerFencedException("Can't trim fenced ledger")); + return; + } long slowestReaderLedgerId = -1; if (!cursors.hasDurableCursors()) { @@ -2402,7 +2420,7 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { break; } // if truncate, all ledgers besides currentLedger are going to be deleted - if (isTruncate){ + if (isTruncate) { if (log.isDebugEnabled()) { log.debug("[{}] Ledger {} will be truncated with ts {}", name, ls.getLedgerId(), ls.getTimestamp()); @@ -2430,11 +2448,14 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { } ledgersToDelete.add(ls); } else { - // once retention constraint has been met, skip check - if (log.isDebugEnabled()) { - log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, ls.getLedgerId()); + if (ls.getLedgerId() < getTheSlowestNonDurationReadPosition().getLedgerId()) { + // once retention constraint has been met, skip check + if (log.isDebugEnabled()) { + log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, + ls.getLedgerId()); + } + invalidateReadHandle(ls.getLedgerId()); } - invalidateReadHandle(ls.getLedgerId()); } } @@ -2453,7 +2474,7 @@ void internalTrimLedgers(boolean isTruncate, CompletableFuture promise) { return; } - if (STATE_UPDATER.get(this) == State.CreatingLedger // Give up now and schedule a new trimming + if (currentState == State.CreatingLedger // Give up now and schedule a new trimming || !metadataMutex.tryLock()) { // Avoid deadlocks with other operations updating the ledgers list scheduleDeferredTrimming(isTruncate, promise); trimmerMutex.unlock(); @@ -2520,6 +2541,7 @@ public void operationFailed(MetaStoreException e) { log.warn("[{}] Failed to update the list of ledgers after trimming", name, e); metadataMutex.unlock(); trimmerMutex.unlock(); + handleBadVersion(e); promise.completeExceptionally(e); } @@ -2551,8 +2573,9 @@ private void advanceCursorsIfNecessary(List ledgersToDelete) { // move the mark delete position to the highestPositionToDelete only if it is smaller than the add confirmed // to prevent the edge case where the cursor is caught up to the latest and highestPositionToDelete may be larger than the last add confirmed if (highestPositionToDelete.compareTo((PositionImpl) cursor.getMarkDeletedPosition()) > 0 - && highestPositionToDelete.compareTo((PositionImpl) cursor.getManagedLedger().getLastConfirmedEntry()) <= 0 ) { - cursor.asyncMarkDelete(highestPositionToDelete, new MarkDeleteCallback() { + && highestPositionToDelete.compareTo((PositionImpl) cursor.getManagedLedger().getLastConfirmedEntry()) <= 0 + && !(!cursor.isDurable() && cursor instanceof NonDurableCursorImpl && ((NonDurableCursorImpl) cursor).isReadCompacted())) { + cursor.asyncMarkDelete(highestPositionToDelete, cursor.getProperties(), new MarkDeleteCallback() { @Override public void markDeleteComplete(Object ctx) { } @@ -2605,7 +2628,8 @@ public void deleteLedgerFailed(ManagedLedgerException e, Object ctx) { public void asyncDelete(final DeleteLedgerCallback callback, final Object ctx) { // Delete the managed ledger without closing, since we are not interested in gracefully closing cursors and // ledgers - STATE_UPDATER.set(this, State.Fenced); + setFenced(); + cancelScheduledTasks(); List cursors = Lists.newArrayList(this.cursors); if (cursors.isEmpty()) { @@ -2778,6 +2802,10 @@ public void offloadFailed(ManagedLedgerException e, Object ctx) { @Override public void asyncOffloadPrefix(Position pos, OffloadCallback callback, Object ctx) { + if (config.getLedgerOffloader() != null && config.getLedgerOffloader() == NullLedgerOffloader.INSTANCE) { + callback.offloadFailed(new ManagedLedgerException("NullLedgerOffloader"), ctx); + return; + } PositionImpl requestOffloadTo = (PositionImpl) pos; if (!isValidPosition(requestOffloadTo) && // Also consider the case where the last ledger is currently @@ -2847,7 +2875,7 @@ public void asyncOffloadPrefix(Position pos, OffloadCallback callback, Object ct promise.whenComplete((result, exception) -> { offloadMutex.unlock(); if (exception != null) { - callback.offloadFailed(new ManagedLedgerException(exception), ctx); + callback.offloadFailed(ManagedLedgerException.getManagedLedgerException(exception), ctx); } else { callback.offloadComplete(result, ctx); } @@ -2861,6 +2889,17 @@ public void asyncOffloadPrefix(Position pos, OffloadCallback callback, Object ct private void offloadLoop(CompletableFuture promise, Queue ledgersToOffload, PositionImpl firstUnoffloaded, Optional firstError) { + State currentState = getState(); + if (currentState == State.Closed) { + promise.completeExceptionally(new ManagedLedgerAlreadyClosedException( + String.format("managed ledger [%s] has already closed", name))); + return; + } + if (currentState == State.Fenced) { + promise.completeExceptionally(new ManagedLedgerFencedException( + String.format("managed ledger [%s] is fenced", name))); + return; + } LedgerInfo info = ledgersToOffload.poll(); if (info == null) { if (firstError.isPresent()) { @@ -2887,8 +2926,21 @@ private void offloadLoop(CompletableFuture promise, Queue { if (exception != null) { - log.error("[{}] Failed to offload data for the ledgerId {}", + Throwable e = FutureUtil.unwrapCompletionException(exception); + if (e instanceof MetaStoreException) { + // When a MetaStore exception happens, we can not make sure the metadata + // update is failed or not. Because we have a retry on the connection loss, + // it is possible to get a BadVersion or other exception after retrying. + // So we don't clean up the data if it has metadata operation exception. + log.error("[{}] Failed to update offloaded metadata for the ledgerId {}, " + + "the offloaded data will not be cleaned up", + name, ledgerId, exception); + return; + } else { + log.error("[{}] Failed to offload data for the ledgerId {}, " + + "clean up the offloaded data", name, ledgerId, exception); + } cleanupOffloaded( ledgerId, uuid, driverName, driverMetadata, @@ -2987,6 +3039,7 @@ public void operationComplete(Void result, Stat stat) { @Override public void operationFailed(MetaStoreException e) { + handleBadVersion(e); unlockingPromise.completeExceptionally(e); } }); @@ -3157,20 +3210,16 @@ public PositionImpl getPositionAfterN(final PositionImpl startPosition, long n, long entriesToSkip = n; long currentLedgerId; long currentEntryId; - if (startRange == PositionBound.startIncluded) { currentLedgerId = startPosition.getLedgerId(); currentEntryId = startPosition.getEntryId(); } else { - // e.g. a mark-delete position PositionImpl nextValidPosition = getNextValidPosition(startPosition); currentLedgerId = nextValidPosition.getLedgerId(); currentEntryId = nextValidPosition.getEntryId(); } - boolean lastLedger = false; long totalEntriesInCurrentLedger; - while (entriesToSkip >= 0) { // for the current ledger, the number of entries written is deduced from the lastConfirmedEntry // for previous ledgers, LedgerInfo in ZK has the number of entries @@ -3182,11 +3231,11 @@ public PositionImpl getPositionAfterN(final PositionImpl startPosition, long n, totalEntriesInCurrentLedger = 0; } } else { - totalEntriesInCurrentLedger = ledgers.get(currentLedgerId).getEntries(); + LedgerInfo ledgerInfo = ledgers.get(currentLedgerId); + totalEntriesInCurrentLedger = ledgerInfo != null ? ledgerInfo.getEntries() : 0; } - - long unreadEntriesInCurrentLedger = totalEntriesInCurrentLedger - currentEntryId; - + long unreadEntriesInCurrentLedger = totalEntriesInCurrentLedger > 0 + ? totalEntriesInCurrentLedger - currentEntryId : 0; if (unreadEntriesInCurrentLedger >= entriesToSkip) { // if the current ledger has more entries than what we need to skip // then the return position is in the same ledger @@ -3199,11 +3248,10 @@ public PositionImpl getPositionAfterN(final PositionImpl startPosition, long n, // there are no more ledgers, return the last position currentEntryId = totalEntriesInCurrentLedger; break; - } else { - Long lid = ledgers.ceilingKey(currentLedgerId + 1); - currentLedgerId = lid != null ? lid : (ledgers.lastKey() + 1); - currentEntryId = 0; } + Long lid = ledgers.ceilingKey(currentLedgerId + 1); + currentLedgerId = lid != null ? lid : ledgers.lastKey(); + currentEntryId = 0; } } @@ -3414,6 +3462,10 @@ public void deactivateCursor(ManagedCursor cursor) { } } + public void removeWaitingCursor(ManagedCursor cursor) { + this.waitingCursors.remove(cursor); + } + public boolean isCursorActive(ManagedCursor cursor) { return activeCursors.get(cursor.getName()) != null; } @@ -3467,8 +3519,17 @@ private ManagedLedgerInfo getManagedLedgerInfo() { return buildManagedLedgerInfo(ledgers); } + private ManagedLedgerInfo getManagedLedgerInfo(LedgerInfo newLedger) { + ManagedLedgerInfo.Builder mlInfo = ManagedLedgerInfo.newBuilder().addAllLedgerInfo(ledgers.values()) + .addLedgerInfo(newLedger); + return buildManagedLedgerInfo(mlInfo); + } private ManagedLedgerInfo buildManagedLedgerInfo(Map ledgers) { ManagedLedgerInfo.Builder mlInfo = ManagedLedgerInfo.newBuilder().addAllLedgerInfo(ledgers.values()); + return buildManagedLedgerInfo(mlInfo); + } + + private ManagedLedgerInfo buildManagedLedgerInfo(ManagedLedgerInfo.Builder mlInfo) { if (state == State.Terminated) { mlInfo.setTerminatedPosition(NestedPositionInfo.newBuilder().setLedgerId(lastConfirmedEntry.getLedgerId()) .setEntryId(lastConfirmedEntry.getEntryId())); @@ -3503,6 +3564,7 @@ private void checkManagedLedgerIsOpen() throws ManagedLedgerException { } synchronized void setFenced() { + log.info("{} Moving to Fenced state", name); STATE_UPDATER.set(this, State.Fenced); } @@ -3711,12 +3773,21 @@ private void scheduleTimeoutTask() { ? Math.max(config.getAddEntryTimeoutSeconds(), config.getReadEntryTimeoutSeconds()) : timeoutSec; this.timeoutTask = this.scheduledExecutor.scheduleAtFixedRate(safeRun(() -> { - checkAddTimeout(); - checkReadTimeout(); + checkTimeouts(); }), timeoutSec, timeoutSec, TimeUnit.SECONDS); } } + private void checkTimeouts() { + final State state = STATE_UPDATER.get(this); + if (state == State.Closed + || state == State.Fenced) { + return; + } + checkAddTimeout(); + checkReadTimeout(); + } + private void checkAddTimeout() { long timeoutSec = config.getAddEntryTimeoutSeconds(); if (timeoutSec < 1) { @@ -3724,12 +3795,13 @@ private void checkAddTimeout() { } OpAddEntry opAddEntry = pendingAddEntries.peek(); if (opAddEntry != null) { + final long finalAddOpCount = opAddEntry.addOpCount; boolean isTimedOut = opAddEntry.lastInitTime != -1 && TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - opAddEntry.lastInitTime) >= timeoutSec; if (isTimedOut) { log.error("Failed to add entry for ledger {} in time-out {} sec", (opAddEntry.ledger != null ? opAddEntry.ledger.getId() : -1), timeoutSec); - opAddEntry.handleAddTimeoutFailure(opAddEntry.ledger, opAddEntry.addOpCount); + opAddEntry.handleAddTimeoutFailure(opAddEntry.ledger, finalAddOpCount); } } } @@ -3871,6 +3943,7 @@ public void operationComplete(Void result, Stat version) { @Override public void operationFailed(MetaStoreException e) { log.error("[{}] Update managedLedger's properties failed", name, e); + handleBadVersion(e); callback.updatePropertiesFailed(e, ctx); metadataMutex.unlock(); } @@ -3983,7 +4056,15 @@ public CompletableFuture> getEnsemblesAsync(long ledgerId) { return CompletableFuture.completedFuture(Collections.emptySet()); } - return getLedgerHandle(ledgerId).thenCompose(lh -> { + CompletableFuture ledgerHandleFuture; + LedgerHandle currentLedger = this.currentLedger; + if (currentLedger != null && ledgerId == currentLedger.getId()) { + ledgerHandleFuture = CompletableFuture.completedFuture(currentLedger); + } else { + ledgerHandleFuture = getLedgerHandle(ledgerId); + } + + return ledgerHandleFuture.thenCompose(lh -> { Set ensembles = new HashSet<>(); lh.getLedgerMetadata().getAllEnsembles().values().forEach(ensembles::addAll); return CompletableFuture.completedFuture(ensembles); @@ -4003,4 +4084,26 @@ private void updateLastLedgerCreatedTimeAndScheduleRolloverTask() { } } + private void cancelScheduledTasks() { + if (this.timeoutTask != null) { + this.timeoutTask.cancel(false); + } + + if (this.checkLedgerRollTask != null) { + this.checkLedgerRollTask.cancel(false); + } + } + + public Position getTheSlowestNonDurationReadPosition() { + PositionImpl theSlowestNonDurableReadPosition = PositionImpl.latest; + for (ManagedCursor cursor : cursors) { + if (cursor instanceof NonDurableCursorImpl) { + PositionImpl readPosition = (PositionImpl) cursor.getReadPosition(); + if (readPosition.compareTo(theSlowestNonDurableReadPosition) < 0) { + theSlowestNonDurableReadPosition = readPosition; + } + } + } + return theSlowestNonDurableReadPosition; + } } diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java index 01a88c157bb0c..4185c5f81b80b 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java @@ -63,6 +63,10 @@ public ManagedLedgerMBeanImpl(ManagedLedgerImpl managedLedger) { public void refreshStats(long period, TimeUnit unit) { double seconds = unit.toMillis(period) / 1000.0; + if (seconds <= 0.0) { + // skip refreshing stats + return; + } addEntryOps.calculateRate(seconds); addEntryWithReplicasOps.calculateRate(seconds); addEntryOpsFailed.calculateRate(seconds); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java index e00dd47a73974..d258c1ca339ca 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java @@ -220,7 +220,8 @@ private void calculateCursorBacklogs(final ManagedLedgerFactoryImpl factory, fin BookKeeper bk = factory.getBookKeeper(); final CountDownLatch allCursorsCounter = new CountDownLatch(1); final long errorInReadingCursor = -1; - ConcurrentOpenHashMap ledgerRetryMap = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap ledgerRetryMap = + ConcurrentOpenHashMap.newBuilder().build(); final MLDataFormats.ManagedLedgerInfo.LedgerInfo ledgerInfo = ledgers.lastEntry().getValue(); final PositionImpl lastLedgerPosition = new PositionImpl(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1); diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java index 5ad62b228bce0..e1c72d2fc0dcb 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java @@ -41,6 +41,7 @@ import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo; import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo; import org.apache.bookkeeper.util.SafeRunnable; +import org.apache.commons.lang.StringUtils; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.compression.CompressionCodec; import org.apache.pulsar.common.compression.CompressionCodecProvider; @@ -57,30 +58,39 @@ public class MetaStoreImpl implements MetaStore { private final MetadataStore store; private final OrderedExecutor executor; - private static final int MAGIC_MANAGED_LEDGER_INFO_METADATA = 0x4778; // 0100 0111 0111 1000 - private final CompressionType compressionType; + private static final int MAGIC_MANAGED_INFO_METADATA = 0x4778; // 0100 0111 0111 1000 + private final CompressionType ledgerInfoCompressionType; + private final CompressionType cursorInfoCompressionType; public MetaStoreImpl(MetadataStore store, OrderedExecutor executor) { this.store = store; this.executor = executor; - this.compressionType = CompressionType.NONE; + this.ledgerInfoCompressionType = CompressionType.NONE; + this.cursorInfoCompressionType = CompressionType.NONE; } - public MetaStoreImpl(MetadataStore store, OrderedExecutor executor, String compressionType) { + public MetaStoreImpl(MetadataStore store, OrderedExecutor executor, String ledgerInfoCompressionType, + String cursorInfoCompressionType) { this.store = store; this.executor = executor; - CompressionType finalCompressionType; - if (compressionType != null) { - try { - finalCompressionType = CompressionType.valueOf(compressionType); - } catch (Exception e) { - log.error("Failed to get compression type {} error msg: {}.", compressionType, e.getMessage()); - throw e; - } - } else { - finalCompressionType = CompressionType.NONE; + this.ledgerInfoCompressionType = parseCompressionType(ledgerInfoCompressionType); + this.cursorInfoCompressionType = parseCompressionType(cursorInfoCompressionType); + } + + private CompressionType parseCompressionType(String value) { + if (StringUtils.isEmpty(value)) { + return CompressionType.NONE; + } + + CompressionType compressionType; + try { + compressionType = CompressionType.valueOf(value); + } catch (Exception e) { + log.error("Failed to get compression type {} error msg: {}.", value, e.getMessage()); + throw e; } - this.compressionType = finalCompressionType; + + return compressionType; } @Override @@ -174,7 +184,7 @@ public void asyncGetCursorInfo(String ledgerName, String cursorName, .thenAcceptAsync(optRes -> { if (optRes.isPresent()) { try { - ManagedCursorInfo info = ManagedCursorInfo.parseFrom(optRes.get().getValue()); + ManagedCursorInfo info = parseManagedCursorInfo(optRes.get().getValue()); callback.operationComplete(info, optRes.get().getStat()); } catch (InvalidProtocolBufferException e) { callback.operationFailed(getException(e)); @@ -196,7 +206,7 @@ public void asyncUpdateCursorInfo(String ledgerName, String cursorName, ManagedC info.getCursorsLedgerId(), info.getMarkDeleteLedgerId(), info.getMarkDeleteEntryId()); String path = PREFIX + ledgerName + "/" + cursorName; - byte[] content = info.toByteArray(); // Binary format + byte[] content = compressCursorInfo(info); long expectedVersion; @@ -306,32 +316,97 @@ private static MetaStoreException getException(Throwable t) { } } + public byte[] compressLedgerInfo(ManagedLedgerInfo managedLedgerInfo) { + if (ledgerInfoCompressionType.equals(CompressionType.NONE)) { + return managedLedgerInfo.toByteArray(); + } + MLDataFormats.ManagedLedgerInfoMetadata mlInfoMetadata = MLDataFormats.ManagedLedgerInfoMetadata + .newBuilder() + .setCompressionType(ledgerInfoCompressionType) + .setUncompressedSize(managedLedgerInfo.getSerializedSize()) + .build(); + return compressManagedInfo(managedLedgerInfo.toByteArray(), mlInfoMetadata.toByteArray(), + mlInfoMetadata.getSerializedSize(), ledgerInfoCompressionType); + } + + public byte[] compressCursorInfo(ManagedCursorInfo managedCursorInfo) { + if (cursorInfoCompressionType.equals(CompressionType.NONE)) { + return managedCursorInfo.toByteArray(); + } + MLDataFormats.ManagedCursorInfoMetadata metadata = MLDataFormats.ManagedCursorInfoMetadata + .newBuilder() + .setCompressionType(cursorInfoCompressionType) + .setUncompressedSize(managedCursorInfo.getSerializedSize()) + .build(); + return compressManagedInfo(managedCursorInfo.toByteArray(), metadata.toByteArray(), + metadata.getSerializedSize(), cursorInfoCompressionType); + } + + public ManagedLedgerInfo parseManagedLedgerInfo(byte[] data) throws InvalidProtocolBufferException { + ByteBuf byteBuf = Unpooled.wrappedBuffer(data); + + byte[] metadataBytes = extractCompressMetadataBytes(byteBuf); + if (metadataBytes != null) { + try { + MLDataFormats.ManagedLedgerInfoMetadata metadata = + MLDataFormats.ManagedLedgerInfoMetadata.parseFrom(metadataBytes); + return ManagedLedgerInfo.parseFrom(getCompressionCodec(metadata.getCompressionType()) + .decode(byteBuf, metadata.getUncompressedSize()).nioBuffer()); + } catch (Exception e) { + log.error("Failed to parse managedLedgerInfo metadata, " + + "fall back to parse managedLedgerInfo directly.", e); + return ManagedLedgerInfo.parseFrom(data); + } finally { + byteBuf.release(); + } + } else { + return ManagedLedgerInfo.parseFrom(data); + } + } + + public ManagedCursorInfo parseManagedCursorInfo(byte[] data) throws InvalidProtocolBufferException { + ByteBuf byteBuf = Unpooled.wrappedBuffer(data); + + byte[] metadataBytes = extractCompressMetadataBytes(byteBuf); + if (metadataBytes != null) { + try { + MLDataFormats.ManagedCursorInfoMetadata metadata = + MLDataFormats.ManagedCursorInfoMetadata.parseFrom(metadataBytes); + return ManagedCursorInfo.parseFrom(getCompressionCodec(metadata.getCompressionType()) + .decode(byteBuf, metadata.getUncompressedSize()).nioBuffer()); + } catch (Exception e) { + log.error("Failed to parse ManagedCursorInfo metadata, " + + "fall back to parse ManagedCursorInfo directly", e); + return ManagedCursorInfo.parseFrom(data); + } finally { + byteBuf.release(); + } + } else { + return ManagedCursorInfo.parseFrom(data); + } + } + /** - * Compress ManagedLedgerInfo data. + * Compress Managed Info data such as LedgerInfo, CursorInfo. * * compression data structure * [MAGIC_NUMBER](2) + [METADATA_SIZE](4) + [METADATA_PAYLOAD] + [MANAGED_LEDGER_INFO_PAYLOAD] - */ - public byte[] compressLedgerInfo(ManagedLedgerInfo managedLedgerInfo) { + */ + private byte[] compressManagedInfo(byte[] info, byte[] metadata, int metadataSerializedSize, + MLDataFormats.CompressionType compressionType) { if (compressionType == null || compressionType.equals(CompressionType.NONE)) { - return managedLedgerInfo.toByteArray(); + return info; } ByteBuf metadataByteBuf = null; ByteBuf encodeByteBuf = null; try { - MLDataFormats.ManagedLedgerInfoMetadata mlInfoMetadata = MLDataFormats.ManagedLedgerInfoMetadata - .newBuilder() - .setCompressionType(compressionType) - .setUncompressedSize(managedLedgerInfo.getSerializedSize()) - .build(); - metadataByteBuf = PulsarByteBufAllocator.DEFAULT.buffer( - mlInfoMetadata.getSerializedSize() + 6, mlInfoMetadata.getSerializedSize() + 6); - metadataByteBuf.writeShort(MAGIC_MANAGED_LEDGER_INFO_METADATA); - metadataByteBuf.writeInt(mlInfoMetadata.getSerializedSize()); - metadataByteBuf.writeBytes(mlInfoMetadata.toByteArray()); - + metadataByteBuf = PulsarByteBufAllocator.DEFAULT.buffer(metadataSerializedSize + 6, + metadataSerializedSize + 6); + metadataByteBuf.writeShort(MAGIC_MANAGED_INFO_METADATA); + metadataByteBuf.writeInt(metadataSerializedSize); + metadataByteBuf.writeBytes(metadata); encodeByteBuf = getCompressionCodec(compressionType) - .encode(Unpooled.wrappedBuffer(managedLedgerInfo.toByteArray())); + .encode(Unpooled.wrappedBuffer(info)); CompositeByteBuf compositeByteBuf = PulsarByteBufAllocator.DEFAULT.compositeBuffer(); compositeByteBuf.addComponent(true, metadataByteBuf); compositeByteBuf.addComponent(true, encodeByteBuf); @@ -348,42 +423,14 @@ public byte[] compressLedgerInfo(ManagedLedgerInfo managedLedgerInfo) { } } - public ManagedLedgerInfo parseManagedLedgerInfo(byte[] data) throws InvalidProtocolBufferException { - ByteBuf byteBuf = Unpooled.wrappedBuffer(data); - if (byteBuf.readableBytes() > 0 && byteBuf.readShort() == MAGIC_MANAGED_LEDGER_INFO_METADATA) { - ByteBuf decodeByteBuf = null; - try { - int metadataSize = byteBuf.readInt(); - byte[] metadataBytes = new byte[metadataSize]; - byteBuf.readBytes(metadataBytes); - MLDataFormats.ManagedLedgerInfoMetadata metadata = - MLDataFormats.ManagedLedgerInfoMetadata.parseFrom(metadataBytes); - - long unpressedSize = metadata.getUncompressedSize(); - decodeByteBuf = getCompressionCodec(metadata.getCompressionType()) - .decode(byteBuf, (int) unpressedSize); - byte[] decodeBytes; - // couldn't decode data by ZLIB compression byteBuf array() directly - if (decodeByteBuf.hasArray() && !CompressionType.ZLIB.equals(metadata.getCompressionType())) { - decodeBytes = decodeByteBuf.array(); - } else { - decodeBytes = new byte[decodeByteBuf.readableBytes() - decodeByteBuf.readerIndex()]; - decodeByteBuf.readBytes(decodeBytes); - } - return ManagedLedgerInfo.parseFrom(decodeBytes); - } catch (Exception e) { - log.error("Failed to parse managedLedgerInfo metadata, " - + "fall back to parse managedLedgerInfo directly.", e); - return ManagedLedgerInfo.parseFrom(data); - } finally { - if (decodeByteBuf != null) { - decodeByteBuf.release(); - } - byteBuf.release(); - } - } else { - return ManagedLedgerInfo.parseFrom(data); + private byte[] extractCompressMetadataBytes(ByteBuf data) { + if (data.readableBytes() > 0 && data.readShort() == MAGIC_MANAGED_INFO_METADATA) { + int metadataSize = data.readInt(); + byte[] metadataBytes = new byte[metadataSize]; + data.readBytes(metadataBytes); + return metadataBytes; } + return null; } private CompressionCodec getCompressionCodec(CompressionType compressionType) { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java index 15b1f047dab3f..4625f5b58006a 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorImpl.java @@ -33,9 +33,12 @@ public class NonDurableCursorImpl extends ManagedCursorImpl { + private final boolean readCompacted; + NonDurableCursorImpl(BookKeeper bookkeeper, ManagedLedgerConfig config, ManagedLedgerImpl ledger, String cursorName, - PositionImpl startCursorPosition, CommandSubscribe.InitialPosition initialPosition) { + PositionImpl startCursorPosition, CommandSubscribe.InitialPosition initialPosition, boolean isReadCompacted) { super(bookkeeper, config, ledger, cursorName); + this.readCompacted = isReadCompacted; // Compare with "latest" position marker by using only the ledger id. Since the C++ client is using 48bits to // store the entryId, it's not able to pass a Long.max() as entryId. In this case there's no point to require @@ -58,14 +61,14 @@ public class NonDurableCursorImpl extends ManagedCursorImpl { // read-position recoverCursor(startCursorPosition); } - + STATE_UPDATER.set(this, State.Open); log.info("[{}] Created non-durable cursor read-position={} mark-delete-position={}", ledger.getName(), readPosition, markDeletePosition); } private void recoverCursor(PositionImpl mdPosition) { Pair lastEntryAndCounter = ledger.getLastPositionAndCounter(); - this.readPosition = ledger.getNextValidPosition(mdPosition); + this.readPosition = isReadCompacted() ? mdPosition.getNext() : ledger.getNextValidPosition(mdPosition); markDeletePosition = mdPosition; // Initialize the counter such that the difference between the messages written on the ML and the @@ -107,7 +110,7 @@ protected void internalAsyncMarkDelete(final PositionImpl newPosition, Map 0) { - // Schedule next read in a different thread + // We still have more entries to read from the next ledger, schedule a new async operation cursor.ledger.getExecutor().execute(safeRun(() -> { - readPosition = cursor.ledger.startReadOperationOnLedger(nextReadPosition, OpReadEntry.this); + readPosition = cursor.ledger.startReadOperationOnLedger(nextReadPosition); cursor.ledger.asyncReadEntries(OpReadEntry.this); })); } else { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java index a5786ad867034..4a77ac91dcaf0 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/RangeCache.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; +import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.lang3.tuple.Pair; /** @@ -74,12 +75,13 @@ public RangeCache(Weighter weighter, TimestampExtractor timestampE * @return whether the entry was inserted in the cache */ public boolean put(Key key, Value value) { - if (entries.putIfAbsent(key, value) == null) { + MutableBoolean flag = new MutableBoolean(); + entries.computeIfAbsent(key, (k) -> { size.addAndGet(weighter.getSize(value)); - return true; - } else { - return false; - } + flag.setValue(true); + return value; + }); + return flag.booleanValue(); } public Value get(Key key) { diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/StatsBuckets.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/StatsBuckets.java index 6d08bf498f837..dd779885391dd 100644 --- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/StatsBuckets.java +++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/StatsBuckets.java @@ -110,7 +110,7 @@ public void addAll(StatsBuckets other) { buckets[i].add(other.values[i]); } - sumCounter.add(other.count); + sumCounter.add(other.sum); } private boolean isSorted(long[] array) { diff --git a/managed-ledger/src/main/proto/MLDataFormats.proto b/managed-ledger/src/main/proto/MLDataFormats.proto index a3528b664e29f..4671816c1a199 100644 --- a/managed-ledger/src/main/proto/MLDataFormats.proto +++ b/managed-ledger/src/main/proto/MLDataFormats.proto @@ -137,3 +137,8 @@ message ManagedLedgerInfoMetadata { required CompressionType compressionType = 1; required int32 uncompressedSize = 2; } + +message ManagedCursorInfoMetadata { + required CompressionType compressionType = 1; + required int32 uncompressedSize = 2; +} diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java index 6b9c0094a127e..56de80308e5d1 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorContainerTest.java @@ -175,7 +175,7 @@ public void rewind() { } @Override - public void seek(Position newReadPosition) { + public void seek(Position newReadPosition, boolean force) { } @Override @@ -237,7 +237,8 @@ public void asyncFindNewestMatching(FindPositionConstraint constraint, Predicate } @Override - public void asyncResetCursor(final Position position, AsyncCallbacks.ResetCursorCallback callback) { + public void asyncResetCursor(final Position position, boolean forceReset, + AsyncCallbacks.ResetCursorCallback callback) { } @@ -385,6 +386,11 @@ public List readEntriesOrWait(int maxEntries, long maxSizeBytes) public boolean checkAndUpdateReadPositionChanged() { return false; } + + @Override + public boolean isClosed() { + return false; + } } @Test diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java new file mode 100644 index 0000000000000..8b95876d0ae3c --- /dev/null +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.bookkeeper.mledger.impl; + +import static org.junit.Assert.assertEquals; +import static org.testng.Assert.expectThrows; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.proto.MLDataFormats; +import org.apache.pulsar.common.api.proto.CompressionType; +import org.testng.Assert; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +/** + * ManagedCursorInfo metadata test. + */ +@Slf4j +public class ManagedCursorInfoMetadataTest { + private final String INVALID_TYPE = "INVALID_TYPE"; + + @DataProvider(name = "compressionTypeProvider") + private Object[][] compressionTypeProvider() { + return new Object[][]{ + {null}, + {INVALID_TYPE}, + {CompressionType.NONE.name()}, + {CompressionType.LZ4.name()}, + {CompressionType.ZLIB.name()}, + {CompressionType.ZSTD.name()}, + {CompressionType.SNAPPY.name()} + }; + } + + @Test(dataProvider = "compressionTypeProvider") + public void testEncodeAndDecode(String compressionType) throws IOException { + long ledgerId = 10000; + MLDataFormats.ManagedCursorInfo.Builder builder = MLDataFormats.ManagedCursorInfo.newBuilder(); + + builder.setCursorsLedgerId(ledgerId); + builder.setMarkDeleteLedgerId(ledgerId); + + List batchedEntryDeletionIndexInfos = new ArrayList<>(); + for (int i = 0; i < 1000; i++) { + MLDataFormats.NestedPositionInfo nestedPositionInfo = MLDataFormats.NestedPositionInfo.newBuilder() + .setEntryId(i).setLedgerId(i).build(); + MLDataFormats.BatchedEntryDeletionIndexInfo batchedEntryDeletionIndexInfo = MLDataFormats + .BatchedEntryDeletionIndexInfo.newBuilder().setPosition(nestedPositionInfo).build(); + batchedEntryDeletionIndexInfos.add(batchedEntryDeletionIndexInfo); + } + builder.addAllBatchedEntryDeletionIndexInfo(batchedEntryDeletionIndexInfos); + + MetaStoreImpl metaStore; + if (INVALID_TYPE.equals(compressionType)) { + IllegalArgumentException compressionTypeEx = expectThrows(IllegalArgumentException.class, () -> { + new MetaStoreImpl(null, null, null, compressionType); + }); + assertEquals("No enum constant org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType." + + compressionType, compressionTypeEx.getMessage()); + return; + } else { + metaStore = new MetaStoreImpl(null, null, null, compressionType); + } + + MLDataFormats.ManagedCursorInfo managedCursorInfo = builder.build(); + byte[] compressionBytes = metaStore.compressCursorInfo(managedCursorInfo); + log.info("[{}] Uncompressed data size: {}, compressed data size: {}", + compressionType, managedCursorInfo.getSerializedSize(), compressionBytes.length); + if (compressionType == null || compressionType.equals(CompressionType.NONE.name())) { + Assert.assertEquals(compressionBytes.length, managedCursorInfo.getSerializedSize()); + } + + // parse compression data and unCompression data, check their results. + MLDataFormats.ManagedCursorInfo info1 = metaStore.parseManagedCursorInfo(compressionBytes); + MLDataFormats.ManagedCursorInfo info2 = metaStore.parseManagedCursorInfo(managedCursorInfo.toByteArray()); + Assert.assertEquals(info1, info2); + } +} diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java index 4bfcf944c437e..4c469c1989358 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger.impl; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.eq; @@ -26,10 +27,10 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - import com.google.common.base.Charsets; import com.google.common.collect.Lists; import com.google.common.collect.Range; @@ -42,10 +43,12 @@ import java.util.BitSet; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; @@ -57,8 +60,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import java.util.stream.Collectors; - import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import lombok.Cleanup; @@ -66,6 +69,7 @@ import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.BookKeeper.DigestType; import org.apache.bookkeeper.client.LedgerEntry; +import org.apache.bookkeeper.client.LedgerHandle; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCallback; @@ -87,6 +91,7 @@ import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo; import org.apache.bookkeeper.mledger.proto.MLDataFormats.PositionInfo; import org.apache.bookkeeper.test.MockedBookKeeperTestCase; +import org.apache.pulsar.common.util.collections.LongPairRangeSet; import org.apache.pulsar.metadata.api.extended.SessionEvent; import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreException; @@ -95,14 +100,29 @@ import org.awaitility.Awaitility; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.reflect.Whitebox; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; +import org.testng.IObjectFactory; import org.testng.annotations.DataProvider; +import org.testng.annotations.ObjectFactory; import org.testng.annotations.Test; +@PrepareForTest({ + OpReadEntry.class +}) +@PowerMockIgnore({"org.apache.logging.log4j.*"}) public class ManagedCursorTest extends MockedBookKeeperTestCase { + @ObjectFactory + public IObjectFactory getObjectFactory() { + return new org.powermock.modules.testng.PowerMockObjectFactory(); + } + private static final Charset Encoding = Charsets.UTF_8; @DataProvider(name = "useOpenRangeSet") @@ -111,6 +131,39 @@ public static Object[][] useOpenRangeSet() { } + @Test + public void testCloseCursor() throws Exception { + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMaxUnackedRangesToPersistInZk(0); + config.setThrottleMarkDelete(0); + ManagedLedger ledger = factory.open("my_test_ledger", config); + ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1"); + // Write some data. + ledger.addEntry(new byte[]{1}); + ledger.addEntry(new byte[]{2}); + ledger.addEntry(new byte[]{3}); + ledger.addEntry(new byte[]{4}); + ledger.addEntry(new byte[]{5}); + // Persistent cursor info to ledger. + c1.delete(PositionImpl.get(c1.getReadPosition().getLedgerId(), c1.getReadPosition().getEntryId())); + Awaitility.await().until(() ->c1.getStats().getPersistLedgerSucceed() > 0); + // Make cursor ledger can not work. + closeCursorLedger(c1); + c1.delete(PositionImpl.get(c1.getReadPosition().getLedgerId(), c1.getReadPosition().getEntryId() + 2)); + ledger.close(); + } + + private static void closeCursorLedger(ManagedCursorImpl managedCursor) { + Awaitility.await().until(() -> { + LedgerHandle ledgerHandle = Whitebox.getInternalState(managedCursor, "cursorLedger"); + if (ledgerHandle == null) { + return false; + } + ledgerHandle.close(); + return true; + }); + } + @Test(timeOut = 20000) void readFromEmptyLedger() throws Exception { ManagedLedger ledger = factory.open("my_test_ledger"); @@ -685,7 +738,7 @@ void testasyncResetCursor() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); PositionImpl resetPosition = new PositionImpl(lastPosition.getLedgerId(), lastPosition.getEntryId() - 2); - cursor.asyncResetCursor(resetPosition, new AsyncCallbacks.ResetCursorCallback() { + cursor.asyncResetCursor(resetPosition, false, new AsyncCallbacks.ResetCursorCallback() { @Override public void resetComplete(Object ctx) { moveStatus.set(true); @@ -736,7 +789,7 @@ public AtomicBoolean call() throws Exception { final PositionImpl resetPosition = new PositionImpl(lastPosition.getLedgerId(), lastPosition.getEntryId() - (5 * idx)); - cursor.asyncResetCursor(resetPosition, new AsyncCallbacks.ResetCursorCallback() { + cursor.asyncResetCursor(resetPosition, false, new AsyncCallbacks.ResetCursorCallback() { @Override public void resetComplete(Object ctx) { moveStatus.set(true); @@ -770,6 +823,47 @@ public void resetFailed(ManagedLedgerException exception, Object ctx) { ledger.close(); } + @Test(timeOut = 20000) + void testLastActiveAfterResetCursor() throws Exception { + ManagedLedger ledger = factory.open("test_cursor_ledger"); + ManagedCursor cursor = ledger.openCursor("tla"); + + PositionImpl lastPosition = null; + for (int i = 0; i < 3; i++) { + lastPosition = (PositionImpl) ledger.addEntry("dummy-entry".getBytes(Encoding)); + } + + final AtomicBoolean moveStatus = new AtomicBoolean(false); + CountDownLatch countDownLatch = new CountDownLatch(1); + + long lastActive = cursor.getLastActive(); + + cursor.asyncResetCursor(lastPosition, false, new AsyncCallbacks.ResetCursorCallback() { + @Override + public void resetComplete(Object ctx) { + moveStatus.set(true); + countDownLatch.countDown(); + } + + @Override + public void resetFailed(ManagedLedgerException exception, Object ctx) { + moveStatus.set(false); + countDownLatch.countDown(); + } + }); + + countDownLatch.await(); + assertTrue(moveStatus.get()); + + assertNotNull(lastPosition); + assertEquals(lastPosition, cursor.getReadPosition()); + + assertNotEquals(lastActive, cursor.getLastActive()); + + cursor.close(); + ledger.close(); + } + @Test(timeOut = 20000) void seekPosition() throws Exception { ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(10)); @@ -2197,6 +2291,9 @@ void testFindNewestMatchingAfterLedgerRollover() throws Exception { // roll a new ledger int numLedgersBefore = ledger.getLedgersInfo().size(); ledger.getConfig().setMaxEntriesPerLedger(1); + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + stateUpdater.set(ledger, ManagedLedgerImpl.State.LedgerOpened); ledger.rollCurrentLedgerIfFull(); Awaitility.await().atMost(20, TimeUnit.SECONDS) .until(() -> ledger.getLedgersInfo().size() > numLedgersBefore); @@ -2251,7 +2348,7 @@ public void findEntryFailed(ManagedLedgerException exception, Optional c1.asyncFindNewestMatching(ManagedCursor.FindPositionConstraint.SearchAllAvailableEntries, entry -> { try { - long publishTime = Long.valueOf(new String(entry.getData())); + long publishTime = Long.parseLong(new String(entry.getData())); return publishTime <= timestamp; } catch (Exception e) { log.error("Error de-serializing message for message position find", e); @@ -2442,6 +2539,11 @@ void testTrimDeletedEntries() throws ManagedLedgerException, InterruptedExceptio assertEquals(entries.size(), 1); assertEquals(entries.get(0).getPosition(), PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 7)); + + assertEquals(entry1.refCnt(), 0); + assertEquals(entry2.refCnt(), 0); + assertEquals(entry3.refCnt(), 0); + assertEquals(entry4.refCnt(), 0); } @Test(timeOut = 20000) @@ -3465,6 +3567,82 @@ public void deleteFailed(ManagedLedgerException exception, Object ctx) { }); } + + + @Test + public void testFlushCursorAfterError() throws Exception { + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setThrottleMarkDelete(1.0); + + ManagedLedgerFactoryConfig factoryConfig = new ManagedLedgerFactoryConfig(); + factoryConfig.setCursorPositionFlushSeconds(1); + + @Cleanup("shutdown") + ManagedLedgerFactory factory1 = new ManagedLedgerFactoryImpl(metadataStore, bkc, factoryConfig); + ManagedLedger ledger1 = factory1.open("testFlushCursorAfterInactivity", config); + ManagedCursor c1 = ledger1.openCursor("c"); + List positions = new ArrayList<>(); + + for (int i = 0; i < 20; i++) { + positions.add(ledger1.addEntry(new byte[1024])); + } + + // Simulate BK write error + bkc.failNow(BKException.Code.NotEnoughBookiesException); + metadataStore.setAlwaysFail(new MetadataStoreException.BadVersionException("")); + + try { + c1.markDelete(positions.get(positions.size() - 1)); + fail("should have failed"); + } catch (ManagedLedgerException e) { + // Expected + } + + metadataStore.unsetAlwaysFail(); + + // In memory position is updated + assertEquals(c1.getMarkDeletedPosition(), positions.get(positions.size() - 1)); + + Awaitility.await() + // Give chance to the flush to be automatically triggered. + // NOTE: this can't be set too low, or it causes issues with ZK thread pool rejecting + .pollDelay(Duration.ofMillis(2000)) + .untilAsserted(() -> { + // Abruptly re-open the managed ledger without graceful close + @Cleanup("shutdown") + ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(metadataStore, bkc); + ManagedLedger ledger2 = factory2.open("testFlushCursorAfterInactivity", config); + ManagedCursor c2 = ledger2.openCursor("c"); + + assertEquals(c2.getMarkDeletedPosition(), positions.get(positions.size() - 1)); + }); + } + + @Test + public void testConsistencyOfIndividualMessages() throws Exception { + ManagedLedger ledger1 = factory.open("testConsistencyOfIndividualMessages"); + ManagedCursorImpl c1 = (ManagedCursorImpl) ledger1.openCursor("c"); + + PositionImpl p1 = (PositionImpl) ledger1.addEntry(new byte[1024]); + c1.markDelete(p1); + + // Artificially add a position that is before the current mark-delete position + LongPairRangeSet idm = c1.getIndividuallyDeletedMessagesSet(); + idm.addOpenClosed(p1.getLedgerId() - 1, 0, p1.getLedgerId() - 1, 10); + + List positions = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + positions.add(ledger1.addEntry(new byte[1024])); + } + + for (int i = 0; i < 20; i++) { + c1.delete(positions.get(i)); + } + + assertEquals(c1.getTotalNonContiguousDeletedMessagesRange(), 0); + assertEquals(c1.getMarkDeletedPosition(), positions.get(positions.size() -1)); + } + @Test public void testCursorCheckReadPositionChanged() throws Exception { ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig()); @@ -3591,5 +3769,102 @@ public void testCursorNoRolloverIfNoMetadataSession() throws Exception { assertNotEquals(cursor.getCursorLedger(), initialLedgerId); } + @Test + public void testReadEmptyEntryList() throws Exception { + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + managedLedgerConfig.setMaxEntriesPerLedger(1); + managedLedgerConfig.setMetadataMaxEntriesPerLedger(1); + managedLedgerConfig.setMinimumRolloverTime(0, TimeUnit.MILLISECONDS); + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory + .open("testReadEmptyEntryList", managedLedgerConfig); + ManagedCursorImpl cursor = (ManagedCursorImpl) ledger.openCursor("test"); + + PositionImpl lastPosition = (PositionImpl) ledger.addEntry("test".getBytes(Encoding)); + ledger.rollCurrentLedgerIfFull(); + + AtomicBoolean flag = new AtomicBoolean(); + flag.set(false); + ReadEntriesCallback callback = new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + if (entries.size() == 0) { + flag.set(true); + } + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + + } + }; + + // op readPosition is bigger than maxReadPosition + OpReadEntry opReadEntry = OpReadEntry.create(cursor, ledger.lastConfirmedEntry, 10, callback, + null, PositionImpl.get(lastPosition.getLedgerId(), -1)); + Field field = ManagedCursorImpl.class.getDeclaredField("readPosition"); + field.setAccessible(true); + field.set(cursor, PositionImpl.earliest); + ledger.asyncReadEntries(opReadEntry); + + // when readPosition is bigger than maxReadPosition, should complete the opReadEntry + Awaitility.await().untilAsserted(() -> assertTrue(flag.get())); + } + + @Test + public void testOpReadEntryRecycle() throws Exception { + final Map opReadEntryToRecycleCount = new ConcurrentHashMap<>(); + final Supplier createOpReadEntry = () -> { + final OpReadEntry mockedOpReadEntry = mock(OpReadEntry.class); + doAnswer(__ -> opReadEntryToRecycleCount.computeIfAbsent(mockedOpReadEntry, + ignored -> new AtomicInteger(0)).getAndIncrement() + ).when(mockedOpReadEntry).recycle(); + return mockedOpReadEntry; + }; + + PowerMockito.mockStatic(OpReadEntry.class); + PowerMockito.when(OpReadEntry.create(any(), any(), anyInt(), any(), any(), any())) + .thenAnswer(__ -> createOpReadEntry.get()); + + final ManagedLedgerConfig ledgerConfig = new ManagedLedgerConfig(); + ledgerConfig.setNewEntriesCheckDelayInMillis(10); + final ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", ledgerConfig); + final ManagedCursorImpl cursor = (ManagedCursorImpl) ledger.openCursor("my_cursor"); + final List exceptions = new ArrayList<>(); + final AtomicBoolean readEntriesSuccess = new AtomicBoolean(false); + final ReadEntriesCallback callback = new ReadEntriesCallback() { + @Override + public void readEntriesComplete(List entries, Object ctx) { + readEntriesSuccess.set(true); + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + exceptions.add(exception); + } + }; + + final int numReadRequests = 3; + for (int i = 0; i < numReadRequests; i++) { + cursor.asyncReadEntriesOrWait(1, callback, null, new PositionImpl(0, 0)); + } + Awaitility.await().atMost(Duration.ofSeconds(1)) + .untilAsserted(() -> assertEquals(ledger.waitingCursors.size(), 1)); + assertTrue(cursor.cancelPendingReadRequest()); + + ledger.addEntry(new byte[1]); + Awaitility.await().atMost(Duration.ofSeconds(1)) + .untilAsserted(() -> assertTrue(ledger.waitingCursors.isEmpty())); + assertFalse(readEntriesSuccess.get()); + + assertEquals(exceptions.size(), numReadRequests - 1); + exceptions.forEach(e -> assertEquals(e.getMessage(), "We can only have a single waiting callback")); + assertEquals(opReadEntryToRecycleCount.size(), 3); + assertEquals(opReadEntryToRecycleCount.entrySet().stream() + .map(Map.Entry::getValue) + .map(AtomicInteger::get) + .collect(Collectors.toList()), + Arrays.asList(1, 1, 1)); + } + private static final Logger log = LoggerFactory.getLogger(ManagedCursorTest.class); } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java index 3c09a2a23875b..a093bac48b95c 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerErrorsTest.java @@ -18,15 +18,19 @@ */ package org.apache.bookkeeper.mledger.impl; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; import static org.testng.Assert.fail; import io.netty.buffer.ByteBuf; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; import lombok.Cleanup; import org.apache.bookkeeper.client.BKException; @@ -387,6 +391,72 @@ public void recoverAfterZnodeVersionError() throws Exception { } } + @Test + public void recoverAfterZnodeVersionErrorWhileTrimming() throws Exception { + ManagedLedger ledger = factory.open("my_test_ledger_trim", + new ManagedLedgerConfig() + .setMaxEntriesPerLedger(2)); + ledger.addEntry("test".getBytes()); + ledger.addEntry("test".getBytes()); + ledger.addEntry("test".getBytes()); + + metadataStore.failConditional(new MetadataStoreException.BadVersionException("err"), (op, path) -> + path.equals("/managed-ledgers/my_test_ledger_trim") + && op == FaultInjectionMetadataStore.OperationType.PUT + ); + + CompletableFuture handle = new CompletableFuture<>(); + ledger.trimConsumedLedgersInBackground(handle); + assertThat(expectThrows(ExecutionException.class, () -> handle.get()).getCause(), + instanceOf(ManagedLedgerException.BadVersionException.class)); + + assertEquals(ManagedLedgerImpl.State.Fenced, ((ManagedLedgerImpl) ledger).getState()); + + // if the task started after the ML moved to Fenced state, it must fail + CompletableFuture handleAlreadyFenced = new CompletableFuture<>(); + ledger.trimConsumedLedgersInBackground(handleAlreadyFenced); + assertThat(expectThrows(ExecutionException.class, () -> handleAlreadyFenced.get()).getCause(), + instanceOf(ManagedLedgerException.ManagedLedgerFencedException.class)); + + try { + ledger.addEntry("entry".getBytes()); + fail("should fail"); + } catch (ManagedLedgerFencedException e) { + assertEquals("Attempted to use a fenced managed ledger", e.getCause().getMessage()); + } + + assertFalse(factory.ledgers.isEmpty()); + try { + ledger.close(); + } catch (ManagedLedgerFencedException e) { + assertEquals("Attempted to use a fenced managed ledger", e.getCause().getMessage()); + } + + // verify that the ManagedLedger has been unregistered even if it was fenced + assertTrue(factory.ledgers.isEmpty()); + } + + @Test + public void badVersionErrorDuringTruncateLedger() throws Exception { + ManagedLedger ledger = factory.open("my_test_ledger_trim", + new ManagedLedgerConfig() + .setMaxEntriesPerLedger(2)); + ledger.addEntry("test".getBytes()); + ledger.addEntry("test".getBytes()); + ledger.addEntry("test".getBytes()); + + metadataStore.failConditional(new MetadataStoreException.BadVersionException("err"), (op, path) -> + path.equals("/managed-ledgers/my_test_ledger_trim") + && op == FaultInjectionMetadataStore.OperationType.PUT + ); + + CompletableFuture handle = ledger.asyncTruncate(); + assertThat(expectThrows(ExecutionException.class, () -> handle.get()).getCause(), + instanceOf(ManagedLedgerException.BadVersionException.class)); + + assertEquals(ManagedLedgerImpl.State.Fenced, ((ManagedLedgerImpl) ledger).getState()); + } + @Test public void recoverAfterWriteError() throws Exception { ManagedLedger ledger = factory.open("my_test_ledger"); diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java index 2f27489aeb9f6..91bc7f143a4ae 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java @@ -19,6 +19,12 @@ package org.apache.bookkeeper.mledger.impl; import com.google.protobuf.InvalidProtocolBufferException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.offload.OffloadUtils; import org.apache.bookkeeper.mledger.proto.MLDataFormats; @@ -28,13 +34,6 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - /** * ManagedLedgerInfo metadata test. */ @@ -91,7 +90,7 @@ public void testEncodeAndDecode(String compressionType) throws IOException { MetaStoreImpl metaStore; try { - metaStore = new MetaStoreImpl(null, null, compressionType); + metaStore = new MetaStoreImpl(null, null, compressionType, null); if ("INVALID_TYPE".equals(compressionType)) { Assert.fail("The managedLedgerInfo compression type is invalid, should fail."); } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java index 4358c2f73620b..faad15933f8fd 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.mledger.impl; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; @@ -52,8 +53,10 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NavigableMap; import java.util.Optional; import java.util.Set; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; @@ -61,6 +64,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; @@ -120,6 +124,7 @@ import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.Stat; import org.awaitility.Awaitility; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -405,6 +410,33 @@ public void spanningMultipleLedgers() throws Exception { ledger.close(); } + @Test + public void testStartReadOperationOnLedgerWithEmptyLedgers() throws ManagedLedgerException, InterruptedException { + ManagedLedger ledger = factory.open("my_test_ledger_1"); + ManagedLedgerImpl ledgerImpl = (ManagedLedgerImpl) ledger; + NavigableMap ledgers = ledgerImpl.getLedgersInfo(); + LedgerInfo ledgerInfo = ledgers.firstEntry().getValue(); + ledgers.clear(); + ManagedCursor c1 = ledger.openCursor("c1"); + PositionImpl position = new PositionImpl(ledgerInfo.getLedgerId(), 0); + PositionImpl maxPosition = new PositionImpl(ledgerInfo.getLedgerId(), 99); + OpReadEntry opReadEntry = OpReadEntry.create((ManagedCursorImpl) c1, position, 20, + new ReadEntriesCallback() { + + @Override + public void readEntriesComplete(List entries, Object ctx) { + + } + + @Override + public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + + } + }, null, maxPosition); + Assert.assertEquals(opReadEntry.readPosition, position); + } + + @Test(timeOut = 20000) public void spanningMultipleLedgersWithSize() throws Exception { ManagedLedgerConfig config = new ManagedLedgerConfig().setMaxEntriesPerLedger(1000000); @@ -1935,6 +1967,9 @@ public void testDeletionAfterLedgerClosedAndRetention() throws Exception { c1.skipEntries(1, IndividualDeletedEntries.Exclude); c2.skipEntries(1, IndividualDeletedEntries.Exclude); // let current ledger close + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + stateUpdater.set(ml, ManagedLedgerImpl.State.LedgerOpened); ml.rollCurrentLedgerIfFull(); // let retention expire Thread.sleep(1500); @@ -2204,6 +2239,9 @@ public void testGetPositionAfterN() throws Exception { managedCursor.markDelete(positionMarkDelete); //trigger ledger rollover and wait for the new ledger created + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + stateUpdater.set(managedLedger, ManagedLedgerImpl.State.LedgerOpened); managedLedger.rollCurrentLedgerIfFull(); Awaitility.await().untilAsserted(() -> assertEquals(managedLedger.getLedgersInfo().size(), 3)); assertEquals(5, managedLedger.getLedgersInfoAsList().get(0).getEntries()); @@ -2232,6 +2270,52 @@ public void testGetPositionAfterN() throws Exception { log.info("Target position is {}", targetPosition); assertEquals(targetPosition.getLedgerId(), secondLedger); assertEquals(targetPosition.getEntryId(), 4); + + // test for n > NumberOfEntriesInStorage + searchPosition = new PositionImpl(secondLedger, 0); + targetPosition = managedLedger.getPositionAfterN(searchPosition, 100, ManagedLedgerImpl.PositionBound.startIncluded); + assertEquals(targetPosition.getLedgerId(), secondLedger); + assertEquals(targetPosition.getEntryId(), 4); + + // test for startPosition > current ledger + searchPosition = new PositionImpl(999, 0); + targetPosition = managedLedger.getPositionAfterN(searchPosition, 0, ManagedLedgerImpl.PositionBound.startIncluded); + assertEquals(targetPosition.getLedgerId(), secondLedger); + assertEquals(targetPosition.getEntryId(), 4); + + searchPosition = new PositionImpl(999, 0); + targetPosition = managedLedger.getPositionAfterN(searchPosition, 10, ManagedLedgerImpl.PositionBound.startExcluded); + assertEquals(targetPosition.getLedgerId(), secondLedger); + assertEquals(targetPosition.getEntryId(), 4); + } + + @Test + public void testGetNumberOfEntriesInStorage() throws Exception { + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + managedLedgerConfig.setMaxEntriesPerLedger(5); + ManagedLedgerImpl managedLedger = + (ManagedLedgerImpl) factory.open("testGetNumberOfEntriesInStorage", managedLedgerConfig); + // open cursor to prevent ledger to be deleted when ledger rollover + ManagedCursorImpl managedCursor = (ManagedCursorImpl) managedLedger.openCursor("cursor"); + int numberOfEntries = 10; + for (int i = 0; i < numberOfEntries; i++) { + managedLedger.addEntry(("entry-" + i).getBytes(Encoding)); + } + + //trigger ledger rollover and wait for the new ledger created + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + stateUpdater.set(managedLedger, ManagedLedgerImpl.State.LedgerOpened); + managedLedger.rollCurrentLedgerIfFull(); + Awaitility.await().untilAsserted(() -> { + assertEquals(managedLedger.getLedgersInfo().size(), 3); + assertEquals(managedLedger.getState(), ManagedLedgerImpl.State.LedgerOpened); + }); + assertEquals(5, managedLedger.getLedgersInfoAsList().get(0).getEntries()); + assertEquals(5, managedLedger.getLedgersInfoAsList().get(1).getEntries()); + log.info("### ledgers {}", managedLedger.getLedgersInfo()); + long length = managedCursor.getNumberOfEntriesInStorage(); + assertEquals(length, numberOfEntries); } @Test @@ -2829,7 +2913,7 @@ public void avoidUseSameOpAddEntryBetweenDifferentLedger() throws Exception { List oldOps = new ArrayList<>(); for (int i = 0; i < 10; i++) { - OpAddEntry op = OpAddEntry.create(ledger, ByteBufAllocator.DEFAULT.buffer(128), null, null); + OpAddEntry op = OpAddEntry.createNoRetainBuffer(ledger, ByteBufAllocator.DEFAULT.buffer(128).retain(), null, null); if (i > 4) { op.setLedger(mock(LedgerHandle.class)); } @@ -3062,7 +3146,7 @@ public void testManagedLedgerRollOverIfFull() throws Exception { ledger.addEntry(new byte[1024 * 1024]); } - Assert.assertEquals(ledger.getLedgersInfoAsList().size(), msgNum / 2); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(ledger.getLedgersInfoAsList().size(), msgNum / 2)); List entries = cursor.readEntries(msgNum); Assert.assertEquals(msgNum, entries.size()); @@ -3073,9 +3157,12 @@ public void testManagedLedgerRollOverIfFull() throws Exception { // all the messages have benn acknowledged // and all the ledgers have been removed except the last ledger - Thread.sleep(1000); - Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 1); - Assert.assertEquals(ledger.getTotalSize(), 0); + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + stateUpdater.set(ledger, ManagedLedgerImpl.State.LedgerOpened); + ledger.rollCurrentLedgerIfFull(); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 1)); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(ledger.getTotalSize(), 0)); } @Test @@ -3093,6 +3180,26 @@ public void testLedgerReachMaximumRolloverTime() throws Exception { .until(() -> firstLedgerId != ml.addEntry("test".getBytes()).getLedgerId()); } + @Test + public void testLedgerNotRolloverWithoutOpenState() throws Exception { + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMaxEntriesPerLedger(2); + + ManagedLedgerImpl ml = spy((ManagedLedgerImpl)factory.open("ledger-not-rollover-without-open-state", config)); + ml.addEntry("test1".getBytes()).getLedgerId(); + long ledgerId2 = ml.addEntry("test2".getBytes()).getLedgerId(); + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + // Set state to CreatingLedger to avoid rollover + stateUpdater.set(ml, ManagedLedgerImpl.State.CreatingLedger); + ml.rollCurrentLedgerIfFull(); + Field currentLedger = ManagedLedgerImpl.class.getDeclaredField("currentLedger"); + currentLedger.setAccessible(true); + LedgerHandle lh = (LedgerHandle) currentLedger.get(ml); + Awaitility.await() + .until(() -> ledgerId2 == lh.getId()); + } + @Test public void testExpiredLedgerDeletionAfterManagedLedgerRestart() throws Exception { ManagedLedgerConfig config = new ManagedLedgerConfig(); @@ -3360,4 +3467,116 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { managedLedgerB.close(); } + + @Test + public void testCancellationOfScheduledTasks() throws Exception { + Field timeoutTaskField = ManagedLedgerImpl.class.getDeclaredField("timeoutTask"); + timeoutTaskField.setAccessible(true); + Field checkLedgerRollTaskField = ManagedLedgerImpl.class.getDeclaredField("checkLedgerRollTask"); + checkLedgerRollTaskField.setAccessible(true); + + ManagedLedgerImpl ledger1 = (ManagedLedgerImpl) factory.open("my_test_ledger_1"); + ledger1.addEntry("dummy-entry-1".getBytes(Encoding)); + ScheduledFuture timeoutTask1 = (ScheduledFuture) timeoutTaskField.get(ledger1); + assertNotNull(timeoutTask1); + assertFalse(timeoutTask1.isDone()); + ScheduledFuture checkLedgerRollTask1 = (ScheduledFuture) checkLedgerRollTaskField.get(ledger1); + assertNotNull(checkLedgerRollTask1); + assertFalse(checkLedgerRollTask1.isDone()); + ledger1.close(); + assertTrue(timeoutTask1.isCancelled()); + assertTrue(checkLedgerRollTask1.isCancelled()); + + ManagedLedgerImpl ledger2 = (ManagedLedgerImpl) factory.open("my_test_ledger_2"); + ledger2.addEntry("dummy-entry-2".getBytes(Encoding)); + ScheduledFuture timeoutTask2 = (ScheduledFuture) timeoutTaskField.get(ledger2); + assertNotNull(timeoutTask2); + assertFalse(timeoutTask2.isDone()); + ScheduledFuture checkLedgerRollTask2 = (ScheduledFuture) checkLedgerRollTaskField.get(ledger2); + assertNotNull(checkLedgerRollTask2); + assertFalse(checkLedgerRollTask2.isDone()); + ledger2.delete(); + assertTrue(timeoutTask2.isCancelled()); + assertTrue(checkLedgerRollTask2.isCancelled()); + } + + @Test + public void testOffloadTaskCancelled() throws Exception { + ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMaxEntriesPerLedger(2); + config.setMinimumRolloverTime(0, TimeUnit.SECONDS); + + OffloadPoliciesImpl offloadPolicies = new OffloadPoliciesImpl(); + offloadPolicies.setManagedLedgerOffloadDriver("mock"); + offloadPolicies.setManagedLedgerOffloadThresholdInBytes(0L); + LedgerOffloader ledgerOffloader = Mockito.mock(LedgerOffloader.class); + Mockito.when(ledgerOffloader.getOffloadPolicies()).thenReturn(offloadPolicies); + Mockito.when(ledgerOffloader.getOffloadDriverName()).thenReturn(offloadPolicies.getManagedLedgerOffloadDriver()); + config.setLedgerOffloader(ledgerOffloader); + + CompletableFuture readHandle = new CompletableFuture<>(); + readHandle.complete(mock(ReadHandle.class)); + + CompletableFuture offloadFuture = new CompletableFuture<>(); + offloadFuture.complete(null); + Mockito.when(ledgerOffloader.offload(any(ReadHandle.class), any(UUID.class), any(Map.class))).thenReturn(offloadFuture); + + final ManagedLedgerImpl ledgerInit = (ManagedLedgerImpl) factory.open("test-offload-task-close", config); + final ManagedLedgerImpl ledger = spy(ledgerInit); + long ledgerId = 3L; + doReturn(readHandle).when(ledger).getLedgerHandle(ledgerId); + doReturn(ManagedLedgerImpl.State.Closed).when(ledger).getState(); + ledger.addEntry("dummy-entry-1".getBytes(Encoding)); + ledger.addEntry("dummy-entry-2".getBytes(Encoding)); + ledger.addEntry("dummy-entry-3".getBytes(Encoding)); + ledger.close(); + + Awaitility.await().untilAsserted(() -> { + CompletableFuture ledgerInfo = ledger.getLedgerInfo(ledgerId); + Assert.assertFalse(ledgerInfo.get(100, TimeUnit.MILLISECONDS).getOffloadContext().getComplete()); + }); + } + + @Test + public void testGetTheSlowestNonDurationReadPosition() throws Exception { + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("test_", + new ManagedLedgerConfig().setMaxEntriesPerLedger(1).setRetentionTime(-1, TimeUnit.SECONDS) + .setRetentionSizeInMB(-1)); + ledger.openCursor("c1"); + + List positions = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + positions.add(ledger.addEntry(("entry-" + i).getBytes(UTF_8))); + } + + Assert.assertEquals(ledger.getTheSlowestNonDurationReadPosition(), PositionImpl.latest); + + ManagedCursor nonDurableCursor = ledger.newNonDurableCursor(PositionImpl.earliest); + + Assert.assertEquals(ledger.getTheSlowestNonDurationReadPosition(), positions.get(0)); + + ledger.deleteCursor(nonDurableCursor.getName()); + + Assert.assertEquals(ledger.getTheSlowestNonDurationReadPosition(), PositionImpl.latest); + + ledger.close(); + } + + @Test + public void testGetLedgerMetadata() throws Exception { + ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) factory.open("testGetLedgerMetadata"); + long lastLedger = managedLedger.ledgers.lastEntry().getKey(); + managedLedger.getLedgerMetadata(lastLedger); + Assert.assertFalse(managedLedger.ledgerCache.containsKey(lastLedger)); + } + + @Test + public void testGetEnsemblesAsync() throws Exception { + // test getEnsemblesAsync of latest ledger will not open it twice and put it in ledgerCache. + ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) factory.open("testGetLedgerMetadata"); + long lastLedger = managedLedger.ledgers.lastEntry().getKey(); + managedLedger.getEnsemblesAsync(lastLedger).join(); + Assert.assertFalse(managedLedger.ledgerCache.containsKey(lastLedger)); + } } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java index 4c2944fd79605..18b09793e26f8 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/NonDurableCursorTest.java @@ -31,6 +31,7 @@ import com.google.common.collect.Lists; import java.nio.charset.Charset; +import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -54,6 +55,7 @@ import org.apache.bookkeeper.test.MockedBookKeeperTestCase; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testng.Assert; import org.testng.annotations.Test; public class NonDurableCursorTest extends MockedBookKeeperTestCase { @@ -371,7 +373,7 @@ void testasyncResetCursor() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); PositionImpl resetPosition = new PositionImpl(lastPosition.getLedgerId(), lastPosition.getEntryId() - 2); - cursor.asyncResetCursor(resetPosition, new AsyncCallbacks.ResetCursorCallback() { + cursor.asyncResetCursor(resetPosition, false, new AsyncCallbacks.ResetCursorCallback() { @Override public void resetComplete(Object ctx) { moveStatus.set(true); @@ -735,6 +737,64 @@ public void testBacklogStatsWhenDroppingData() throws Exception { ledger.close(); } + @Test + public void testInvalidateReadHandleWithSlowNonDurableCursor() throws Exception { + ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testInvalidateReadHandleWithSlowNonDurableCursor", + new ManagedLedgerConfig().setMaxEntriesPerLedger(1).setRetentionTime(-1, TimeUnit.SECONDS) + .setRetentionSizeInMB(-1)); + ManagedCursor c1 = ledger.openCursor("c1"); + ManagedCursor nonDurableCursor = ledger.newNonDurableCursor(PositionImpl.earliest); + + List positions = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + positions.add(ledger.addEntry(("entry-" + i).getBytes(UTF_8))); + } + + CountDownLatch latch = new CountDownLatch(10); + for (int i = 0; i < 10; i++) { + ledger.asyncReadEntry((PositionImpl) positions.get(i), new AsyncCallbacks.ReadEntryCallback() { + @Override + public void readEntryComplete(Entry entry, Object ctx) { + latch.countDown(); + } + + @Override + public void readEntryFailed(ManagedLedgerException exception, Object ctx) { + latch.countDown(); + } + }, null); + } + + latch.await(); + + c1.markDelete(positions.get(4)); + + CompletableFuture promise = new CompletableFuture<>(); + ledger.internalTrimConsumedLedgers(promise); + promise.join(); + + Assert.assertTrue(ledger.ledgerCache.containsKey(positions.get(0).getLedgerId())); + Assert.assertTrue(ledger.ledgerCache.containsKey(positions.get(1).getLedgerId())); + Assert.assertTrue(ledger.ledgerCache.containsKey(positions.get(2).getLedgerId())); + Assert.assertTrue(ledger.ledgerCache.containsKey(positions.get(3).getLedgerId())); + Assert.assertTrue(ledger.ledgerCache.containsKey(positions.get(4).getLedgerId())); + + promise = new CompletableFuture<>(); + + nonDurableCursor.markDelete(positions.get(3)); + + ledger.internalTrimConsumedLedgers(promise); + promise.join(); + + Assert.assertFalse(ledger.ledgerCache.containsKey(positions.get(0).getLedgerId())); + Assert.assertFalse(ledger.ledgerCache.containsKey(positions.get(1).getLedgerId())); + Assert.assertFalse(ledger.ledgerCache.containsKey(positions.get(2).getLedgerId())); + Assert.assertFalse(ledger.ledgerCache.containsKey(positions.get(3).getLedgerId())); + Assert.assertTrue(ledger.ledgerCache.containsKey(positions.get(4).getLedgerId())); + + ledger.close(); + } + @Test(expectedExceptions = NullPointerException.class) void testCursorWithNameIsNotNull() throws Exception { final String p1CursorName = "entry-1"; diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixReadTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixReadTest.java index 426a00db25ec8..e761b04b79b3e 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixReadTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixReadTest.java @@ -66,7 +66,7 @@ public class OffloadPrefixReadTest extends MockedBookKeeperTestCase { @Test public void testOffloadRead() throws Exception { - MockLedgerOffloader offloader = spy(new MockLedgerOffloader()); + MockLedgerOffloader offloader = spy(MockLedgerOffloader.class); ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); @@ -122,7 +122,7 @@ public void testOffloadRead() throws Exception { @Test public void testBookkeeperFirstOffloadRead() throws Exception { - MockLedgerOffloader offloader = spy(new MockLedgerOffloader()); + MockLedgerOffloader offloader = spy(MockLedgerOffloader.class); MockClock clock = new MockClock(); offloader.getOffloadPolicies() .setManagedLedgerOffloadedReadPriority(OffloadedReadPriority.BOOKKEEPER_FIRST); diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java index a7092e4ec46f6..8d0312980a096 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/OffloadPrefixTest.java @@ -21,6 +21,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.google.common.collect.ImmutableSet; @@ -49,6 +50,8 @@ import org.apache.bookkeeper.test.MockedBookKeeperTestCase; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl; +import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.impl.FaultInjectionMetadataStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.Test; @@ -85,7 +88,7 @@ public void testNullOffloader() throws Exception { ledger.offloadPrefix(p); fail("Should have thrown an exception"); } catch (ManagedLedgerException e) { - assertEquals(e.getCause().getClass(), CompletionException.class); + assertEquals(e.getMessage(), "NullLedgerOffloader"); } assertEquals(ledger.getLedgersInfoAsList().size(), 5); assertEquals(ledger.getLedgersInfoAsList().stream() @@ -126,6 +129,51 @@ public void testOffload() throws Exception { .filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers()); + + // ledgers should be marked as offloaded + ledger.getLedgersInfoAsList().stream().allMatch(l -> l.hasOffloadContext()); + } + + @Test + public void testOffloadFenced() throws Exception { + MockLedgerOffloader offloader = new MockLedgerOffloader(); + ManagedLedgerConfig config = new ManagedLedgerConfig(); + config.setMaxEntriesPerLedger(10); + config.setMinimumRolloverTime(0, TimeUnit.SECONDS); + config.setRetentionTime(10, TimeUnit.MINUTES); + config.setRetentionSizeInMB(10); + config.setLedgerOffloader(offloader); + ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); + + int i = 0; + for (; i < 25; i++) { + String content = "entry-" + i; + ledger.addEntry(content.getBytes()); + } + assertEquals(ledger.getLedgersInfoAsList().size(), 3); + + metadataStore.failConditional(new MetadataStoreException.BadVersionException("err"), (op, path) -> + path.equals("/managed-ledgers/my_test_ledger") + && op == FaultInjectionMetadataStore.OperationType.PUT + ); + + assertThrows(ManagedLedgerException.ManagedLedgerFencedException.class, () -> + ledger.offloadPrefix(ledger.getLastConfirmedEntry())); + + assertEquals(ledger.getLedgersInfoAsList().size(), 3); + + // the offloader actually wrote the data on the storage + assertEquals(ledger.getLedgersInfoAsList().stream() + .filter(e -> e.getOffloadContext().getComplete()) + .map(e -> e.getLedgerId()).collect(Collectors.toSet()), + offloader.offloadedLedgers()); + + // but the ledgers should not be marked as offloaded in local memory, as the write to metadata failed + ledger.getLedgersInfoAsList().stream().allMatch(l -> !l.hasOffloadContext()); + + // check that the ledger is fenced + assertEquals(ManagedLedgerImpl.State.Fenced, ledger.getState()); + } @Test diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/RangeCacheTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/RangeCacheTest.java index 95896d24f35f2..f31aa4a74f9d1 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/RangeCacheTest.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/RangeCacheTest.java @@ -29,11 +29,15 @@ import io.netty.util.ReferenceCounted; import org.apache.commons.lang3.tuple.Pair; import org.testng.annotations.Test; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; public class RangeCacheTest { class RefString extends AbstractReferenceCounted implements ReferenceCounted { - final String s; + String s; RefString(String s) { super(); @@ -43,7 +47,7 @@ class RefString extends AbstractReferenceCounted implements ReferenceCounted { @Override protected void deallocate() { - // no-op + s = null; } @Override @@ -122,6 +126,7 @@ public void customWeighter() { assertEquals(cache.getNumberOfEntries(), 2); } + @Test public void customTimeExtraction() { RangeCache cache = new RangeCache<>(value -> value.s.length(), x -> x.s.length()); @@ -268,4 +273,24 @@ public void evictions() { assertEquals((long) res.getRight(), 10); assertEquals(cache.getSize(), 90); } + + @Test + public void testInParallel() { + RangeCache cache = new RangeCache<>(value -> value.s.length(), x -> 0); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + executor.scheduleWithFixedDelay(cache::clear, 10, 10, TimeUnit.MILLISECONDS); + for (int i = 0; i < 1000; i++) { + cache.put(UUID.randomUUID().toString(), new RefString("zero")); + } + executor.shutdown(); + } + + @Test + public void testPutSameObj() { + RangeCache cache = new RangeCache<>(value -> value.s.length(), x -> 0); + RefString s0 = new RefString("zero"); + assertEquals(s0.refCnt(), 1); + assertTrue(cache.put(0, s0)); + assertFalse(cache.put(0, s0)); + } } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/TestStatsBuckets.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/TestStatsBuckets.java index 02366632ed069..12efbb054ded6 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/TestStatsBuckets.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/util/TestStatsBuckets.java @@ -99,4 +99,17 @@ public void test() { assertEquals(stats.getCount(), 3); assertEquals(stats.getBuckets(), new long[] { 1, 0, 1, 1 }); } + + @Test + public void testAddAll() { + StatsBuckets stats = new StatsBuckets(10, 20, 30); + stats.addValue(1); + stats.addValue(2); + stats.refresh(); + StatsBuckets stats2 = new StatsBuckets(10, 20, 30); + stats2.addAll(stats); + stats2.refresh(); + assertEquals(stats2.getSum(),3); + assertEquals(stats2.getCount(),2); + } } diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java b/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java index 006c287610c93..105405fe41c2c 100644 --- a/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java +++ b/managed-ledger/src/test/java/org/apache/bookkeeper/test/BookKeeperClusterTestCase.java @@ -228,7 +228,7 @@ protected ServerConfiguration newServerConfiguration(int port, String zkServers, conf.setBookiePort(port); if (ledgerRootPath != "") { conf.setMetadataServiceUri("zk://" + zkUtil.getZooKeeperConnectString() + ledgerRootPath); - }else { + } else { conf.setZkServers(zkServers); } conf.setJournalDirName(journalDir.getPath()); diff --git a/pom.xml b/pom.xml index 185b391cf7daa..7fdfd201e7c7d 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 Pulsar Pulsar is a distributed pub-sub messaging platform with a very @@ -80,7 +80,7 @@ flexible messaging model and an intuitive client API. 8 8 - + **/Test*.java,**/*Test.java,**/*Tests.java,**/*TestCase.java @@ -102,62 +102,66 @@ flexible messaging model and an intuitive client API. 1.21 - - 4.14.2 + 1.31 + 4.14.5 3.6.3 + 1.5.0 1.1.7 3.2.5 5.1.0 - 4.1.68.Final - 2.0.42.Final - 9.4.43.v20210629 + 4.1.77.Final + 2.0.52.Final + 9.4.48.v20220622 2.5.2 2.34 1.10.9 0.5.0 3.9.8 - 6.10.2 - 1.7.25 + 6.16.4 + 1.7.32 3.2.2 - 2.14.0 + 2.18.0 1.69 1.0.2 - 2.12.3 - 2.12.3 + 2.13.4 0.9.11 1.6.2 8.37 1.4.13 0.5.0 - 3.11.4 + 3.19.6 ${protobuf3.version} - 1.33.0 + 1.45.1 + 1.41.0 0.19.0 ${grpc.version} - 2.8.6 + 2.8.9 + 1.2.1 0.8.3 2.2.0 3.6.0 - 4.4.8 - 2.7.0 + 4.4.20 + 2.7.2 5.1.1 1.11.774 1.10.2 2.10.5 - 2.3.0 + 2.5.0 + 5.1.0 3.8.11.2 8.0.11 - 42.2.12 - 0.2.4 + 42.4.1 + 0.3.2 2.6.0 - 3.3.0 + 3.3.3 7.9.1 332 2.13 2.13.6 - 1.7.0.Final + 1.7.1.Final + 42.4.1 0.11.1 - 0.18.0 + 0.28.0 2.3.0 30.1-jre 1.0 @@ -174,7 +178,6 @@ flexible messaging model and an intuitive client API. 2.8.0 1.15 2.1 - 1.2.17 2.1.9 3.1.0 2.9.1 @@ -183,7 +186,7 @@ flexible messaging model and an intuitive client API. 0.7.3 2.1.0 3.18.1 - 1.18.20 + 1.18.22 1.3.2 2.3.1 1.2.0 @@ -192,14 +195,17 @@ flexible messaging model and an intuitive client API. 2.0.2 4.2.0 12.0.1 - 3.14.9 + 4.9.3 - 1.17.2 + 2.8.0 + + 1.4.32 1.0 - 9.1.3 - 5.3.1 + 9.1.6 + 5.3.19 4.5.13 - + 5.3.3 + 2.0.6 3.6.0 3.4.0 @@ -211,13 +217,14 @@ flexible messaging model and an intuitive client API. 1.1.1 7.3.0 4.13.1 - 3.8.0 + 3.12.4 2.0.9 3.25.0-GA 2.3.1 1.5.0 3.1 4.0.3 + 1.5.1 0.6.1 @@ -247,7 +254,8 @@ flexible messaging model and an intuitive client API. 0.1.4 1.3 0.4 - 6.1.6 + 7.1.0 + 0.9.15 rename-netty-native-libs.sh @@ -265,6 +273,10 @@ flexible messaging model and an intuitive client API. io.netty * + + com.typesafe.netty + netty-reactive-streams + @@ -338,6 +350,11 @@ flexible messaging model and an intuitive client API. zookeeper-jute ${zookeeper.version} + + commons-cli + commons-cli + ${commons-cli.version} + io.dropwizard.metrics metrics-core @@ -624,6 +641,18 @@ flexible messaging model and an intuitive client API. ${guava.version} + + com.google.inject + guice + ${guice.version} + + + + com.google.inject.extensions + guice-assistedinject + ${guice.version} + + org.apache.commons commons-lang3 @@ -771,15 +800,9 @@ flexible messaging model and an intuitive client API. - log4j - log4j - ${log4j.version} - - - com.sun.jmx - jmxri - - + org.codehaus.jettison + jettison + ${jettison.version} @@ -922,6 +945,14 @@ flexible messaging model and an intuitive client API. ${typetools.version} + + io.grpc + grpc-bom + ${grpc.version} + pom + import + + io.grpc grpc-all @@ -947,9 +978,21 @@ flexible messaging model and an intuitive client API. - io.grpc - grpc-core - ${grpc.version} + com.google.http-client + google-http-client + ${google-http-client.version} + + + + com.google.http-client + google-http-client-jackson2 + ${google-http-client.version} + + + + com.google.http-client + google-http-client-gson + ${google-http-client.version} @@ -965,18 +1008,6 @@ flexible messaging model and an intuitive client API. - - io.grpc - grpc-stub - ${grpc.version} - - - - io.grpc - grpc-protobuf-lite - ${grpc.version} - - com.google.protobuf protobuf-bom @@ -1102,6 +1133,12 @@ flexible messaging model and an intuitive client API. ${opencensus.version} + + io.opencensus + opencensus-contrib-http-util + ${opencensus.version} + + io.opencensus opencensus-contrib-grpc-metrics @@ -1186,12 +1223,52 @@ flexible messaging model and an intuitive client API. okhttp-urlconnection ${okhttp3.version} + + com.squareup.okhttp3 + logging-interceptor + ${okhttp3.version} + com.squareup.okio okio ${okio.version} + + org.jetbrains.kotlin + kotlin-stdlib + ${kotlin-stdlib.version} + + + org.jetbrains.kotlin + kotlin-stdlib-common + ${kotlin-stdlib.version} + + + + org.jetbrains.kotlin + kotlin-stdlib-jdk8 + ${kotlin-stdlib.version} + + + + com.typesafe.netty + netty-reactive-streams + ${netty-reactive-streams.version} + + + + org.roaringbitmap + RoaringBitmap + ${roaringbitmap.version} + + + + org.yaml + snakeyaml + ${snakeyaml.version} + + @@ -1222,6 +1299,13 @@ flexible messaging model and an intuitive client API. test + + com.github.stefanbirkner + system-lambda + ${system-lambda.version} + test + + org.powermock powermock-module-testng @@ -2294,6 +2378,14 @@ flexible messaging model and an intuitive client API. false + + maven.restlet.org + maven.restlet.org + https://maven.restlet.talend.com + + false + + diff --git a/pulsar-broker-auth-athenz/pom.xml b/pulsar-broker-auth-athenz/pom.xml index bbec5e0084dcf..f42eb59f5a91f 100644 --- a/pulsar-broker-auth-athenz/pom.xml +++ b/pulsar-broker-auth-athenz/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-broker-auth-athenz diff --git a/pulsar-broker-auth-sasl/pom.xml b/pulsar-broker-auth-sasl/pom.xml index 13dd14aa8f58a..c82d2219a47b2 100644 --- a/pulsar-broker-auth-sasl/pom.xml +++ b/pulsar-broker-auth-sasl/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-broker-auth-sasl diff --git a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java index aabdcf81df387..e7b2c4411fdbe 100644 --- a/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java +++ b/pulsar-broker-auth-sasl/src/test/java/org/apache/pulsar/broker/authentication/ProxySaslAuthenticationTest.java @@ -224,6 +224,7 @@ void testAuthentication() throws Exception { ProxyConfiguration proxyConfig = new ProxyConfiguration(); proxyConfig.setAuthenticationEnabled(true); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); proxyConfig.setSaslJaasClientAllowedIds(".*" + localHostname + ".*"); diff --git a/pulsar-broker-common/pom.xml b/pulsar-broker-common/pom.xml index 91179a1bd8f6f..b675405677541 100644 --- a/pulsar-broker-common/pom.xml +++ b/pulsar-broker-common/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-broker-common diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ClassLoaderSwitcher.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ClassLoaderSwitcher.java new file mode 100644 index 0000000000000..787182ef012bc --- /dev/null +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ClassLoaderSwitcher.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker; + +/** + * Help to switch the class loader of current thread to the NarClassLoader, and change it back when it's done. + * With the help of try-with-resources statement, the code would be cleaner than using try finally every time. + */ +public class ClassLoaderSwitcher implements AutoCloseable { + private final ClassLoader prevClassLoader; + + public ClassLoaderSwitcher(ClassLoader classLoader) { + prevClassLoader = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(classLoader); + } + + @Override + public void close() { + Thread.currentThread().setContextClassLoader(prevClassLoader); + } +} \ No newline at end of file diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java index 4c7ee850ad195..aed35bdad41f3 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java @@ -28,6 +28,7 @@ import java.util.Optional; import java.util.Properties; import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.TimeUnit; import lombok.Getter; import lombok.Setter; @@ -144,6 +145,26 @@ public class ServiceConfiguration implements PulsarConfiguration { ) private Optional webServicePortTls = Optional.empty(); + @FieldContext( + category = CATEGORY_SERVER, + doc = "Specify the TLS provider for the web service: SunJSSE, Conscrypt and etc." + ) + private String webServiceTlsProvider = "Conscrypt"; + + @FieldContext( + category = CATEGORY_TLS, + doc = "Specify the tls protocols the proxy's web service will use to negotiate during TLS Handshake.\n\n" + + "Example:- [TLSv1.3, TLSv1.2]" + ) + private Set webServiceTlsProtocols = new TreeSet<>(); + + @FieldContext( + category = CATEGORY_TLS, + doc = "Specify the tls cipher the proxy's web service will use to negotiate during TLS Handshake.\n\n" + + "Example:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]" + ) + private Set webServiceTlsCiphers = new TreeSet<>(); + @FieldContext( category = CATEGORY_SERVER, doc = "Hostname or IP address the service binds on" @@ -253,9 +274,26 @@ public class ServiceConfiguration implements PulsarConfiguration { private String delayedDeliveryTrackerFactoryClassName = "org.apache.pulsar.broker.delayed.InMemoryDelayedDeliveryTrackerFactory"; @FieldContext(category = CATEGORY_SERVER, doc = "Control the tick time for when retrying on delayed delivery, " - + " affecting the accuracy of the delivery time compared to the scheduled time. Default is 1 second.") + + "affecting the accuracy of the delivery time compared to the scheduled time. Default is 1 second. " + + "Note that this time is used to configure the HashedWheelTimer's tick time for the " + + "InMemoryDelayedDeliveryTrackerFactory.") private long delayedDeliveryTickTimeMillis = 1000; + @FieldContext(category = CATEGORY_SERVER, doc = "When using the InMemoryDelayedDeliveryTrackerFactory (the default " + + "DelayedDeliverTrackerFactory), whether the deliverAt time is strictly followed. When false (default), " + + "messages may be sent to consumers before the deliverAt time by as much as the tickTimeMillis. This can " + + "reduce the overhead on the broker of maintaining the delayed index for a potentially very short time " + + "period. When true, messages will not be sent to consumer until the deliverAt time has passed, and they " + + "may be as late as the deliverAt time plus the tickTimeMillis for the topic plus the " + + "delayedDeliveryTickTimeMillis.") + private boolean isDelayedDeliveryDeliverAtTimeStrict = false; + + @FieldContext(category = CATEGORY_SERVER, doc = "Size of the lookahead window to use " + + "when detecting if all the messages in the topic have a fixed delay. " + + "Default is 50,000. Setting the lookahead window to 0 will disable the " + + "logic to handle fixed delays in messages in a different way.") + private long delayedDeliveryFixedDelayDetectionLookahead = 50_000; + @FieldContext(category = CATEGORY_SERVER, doc = "Whether to enable the acknowledge of batch local index") private boolean acknowledgmentAtBatchIndexLevelEnabled = false; @@ -572,6 +610,22 @@ public class ServiceConfiguration implements PulsarConfiguration { ) private int brokerMaxConnectionsPerIp = 0; + @FieldContext( + category = CATEGORY_POLICIES, + dynamic = true, + doc = "Allow schema to be auto updated at broker level. User can override this by 'is_allow_auto_update_schema'" + + " of namespace policy. This is enabled by default." + ) + private boolean isAllowAutoUpdateSchemaEnabled = true; + + @FieldContext( + category = CATEGORY_SERVER, + doc = "Whether to enable the automatic shrink of pendingAcks map, " + + "the default is false, which means it is not enabled. " + + "When there are a large number of share or key share consumers in the cluster, " + + "it can be enabled to reduce the memory consumption caused by pendingAcks.") + private boolean autoShrinkForConsumerPendingAcksMap = false; + @FieldContext( category = CATEGORY_SERVER, dynamic = true, @@ -779,6 +833,27 @@ public class ServiceConfiguration implements PulsarConfiguration { ) private int dispatcherMinReadBatchSize = 1; + @FieldContext( + dynamic = true, + category = CATEGORY_SERVER, + doc = "The read failure backoff initial time in milliseconds. By default it is 15s." + ) + private int dispatcherReadFailureBackoffInitialTimeInMs = 15000; + + @FieldContext( + dynamic = true, + category = CATEGORY_SERVER, + doc = "The read failure backoff max time in milliseconds. By default it is 60s." + ) + private int dispatcherReadFailureBackoffMaxTimeInMs = 60000; + + @FieldContext( + dynamic = true, + category = CATEGORY_SERVER, + doc = "The read failure backoff mandatory stop time in milliseconds. By default it is 0s." + ) + private int dispatcherReadFailureBackoffMandatoryStopTimeInMs = 0; + @FieldContext( dynamic = true, category = CATEGORY_SERVER, @@ -1014,6 +1089,13 @@ public class ServiceConfiguration implements PulsarConfiguration { doc = "Enable or disable system topic.") private boolean systemTopicEnabled = false; + @FieldContext( + category = CATEGORY_SCHEMA, + doc = "The schema compatibility strategy to use for system topics" + ) + private SchemaCompatibilityStrategy systemTopicSchemaCompatibilityStrategy = + SchemaCompatibilityStrategy.ALWAYS_COMPATIBLE; + @FieldContext( category = CATEGORY_SERVER, doc = "Enable or disable topic level policies, topic level policies depends on the system topic, " + @@ -1641,6 +1723,11 @@ public class ServiceConfiguration implements PulsarConfiguration { + "If value is invalid or NONE, then save the ManagedLedgerInfo bytes data directly.") private String managedLedgerInfoCompressionType = "NONE"; + @FieldContext(category = CATEGORY_STORAGE_ML, + doc = "ManagedCursorInfo compression type, option values (NONE, LZ4, ZLIB, ZSTD, SNAPPY). \n" + + "If value is NONE, then save the ManagedCursorInfo bytes data directly.") + private String managedCursorInfoCompressionType = "NONE"; + /*** --- Load balancer --- ****/ @FieldContext( category = CATEGORY_LOAD_BALANCER, @@ -1672,6 +1759,12 @@ public class ServiceConfiguration implements PulsarConfiguration { category = CATEGORY_LOAD_BALANCER, doc = "maximum interval to update load report" ) + private int loadBalancerReportUpdateMinIntervalMillis = 5000; + @FieldContext( + category = CATEGORY_LOAD_BALANCER, + dynamic = true, + doc = "Min delay of load report to collect, in milli-seconds" + ) private int loadBalancerReportUpdateMaxIntervalMinutes = 15; @FieldContext( category = CATEGORY_LOAD_BALANCER, @@ -1975,10 +2068,9 @@ public class ServiceConfiguration implements PulsarConfiguration { @FieldContext( category = CATEGORY_SCHEMA, - doc = "The schema compatibility strategy in broker level. If this config in namespace policy is `UNDEFINED`" - + ", schema compatibility strategy check will use it in broker level." + doc = "The schema compatibility strategy in broker level" ) - private SchemaCompatibilityStrategy schemaCompatibilityStrategy = SchemaCompatibilityStrategy.UNDEFINED; + private SchemaCompatibilityStrategy schemaCompatibilityStrategy = SchemaCompatibilityStrategy.FULL; /**** --- WebSocket --- ****/ @FieldContext( @@ -2180,7 +2272,29 @@ public class ServiceConfiguration implements PulsarConfiguration { ) private int transactionBufferSnapshotMinTimeInMillis = 5000; - /**** --- KeyStore TLS config variables --- ****/ + + @FieldContext( + category = CATEGORY_TRANSACTION, + doc = "The max concurrent requests for transaction buffer client." + ) + private int transactionBufferClientMaxConcurrentRequests = 1000; + + @FieldContext( + category = CATEGORY_TRANSACTION, + doc = "The transaction buffer client's operation timeout in milliseconds." + ) + private long transactionBufferClientOperationTimeoutInMills = 3000L; + + @FieldContext( + category = CATEGORY_TRANSACTION, + doc = "MLPendingAckStore maintain a ConcurrentSkipListMap pendingAckLogIndex`," + + "it store the position in pendingAckStore as value and save a position used to determine" + + "whether the previous data can be cleaned up as a key." + + "transactionPendingAckLogIndexMinLag is used to configure the minimum lag between indexes" + ) + private long transactionPendingAckLogIndexMinLag = 500L; + + /**** --- KeyStore TLS config variables. --- ****/ @FieldContext( category = CATEGORY_KEYSTORE_TLS, doc = "Enable TLS with KeyStore type configuration in broker" @@ -2189,7 +2303,9 @@ public class ServiceConfiguration implements PulsarConfiguration { @FieldContext( category = CATEGORY_KEYSTORE_TLS, - doc = "TLS Provider for KeyStore type" + doc = "Specify the TLS provider for the broker service: \n" + + "When using TLS authentication with CACert, the valid value is either OPENSSL or JDK.\n" + + "When using TLS authentication with KeyStore, available values can be SunJSSE, Conscrypt and etc." ) private String tlsProvider = null; @@ -2400,4 +2516,10 @@ public int getBrokerDeleteInactiveTopicsMaxInactiveDurationSeconds() { } } + public SchemaCompatibilityStrategy getSchemaCompatibilityStrategy() { + if (SchemaCompatibilityStrategy.isUndefined(schemaCompatibilityStrategy)) { + return SchemaCompatibilityStrategy.FULL; + } + return schemaCompatibilityStrategy; + } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfigurationUtils.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfigurationUtils.java index 8401723ed7a9f..7507181f34bdd 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfigurationUtils.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfigurationUtils.java @@ -83,12 +83,22 @@ public static String getAppliedAdvertisedAddress(ServiceConfiguration configurat /** * Gets the internal advertised listener for broker-to-broker communication. - * @return an advertised listener + * @return a non-null advertised listener */ - public static AdvertisedListener getInternalListener(ServiceConfiguration config) { + public static AdvertisedListener getInternalListener(ServiceConfiguration config, String protocol) { Map result = MultipleListenerValidator .validateAndAnalysisAdvertisedListener(config); AdvertisedListener internal = result.get(config.getInternalListenerName()); + if (internal == null || !internal.hasUriForProtocol(protocol)) { + // Search for an advertised listener for same protocol + for (AdvertisedListener l : result.values()) { + if (l.hasUriForProtocol(protocol)) { + internal = l; + break; + } + } + } + if (internal == null) { // synthesize an advertised listener based on legacy configuration properties String host = ServiceConfigurationUtils.getDefaultOrConfiguredAddress(config.getAdvertisedAddress()); diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataCommand.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataCommand.java index efc332968afb2..31bc670a2de1e 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataCommand.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataCommand.java @@ -30,22 +30,9 @@ public class AuthenticationDataCommand implements AuthenticationDataSource { protected final String authData; protected final SocketAddress remoteAddress; protected final SSLSession sslSession; - protected String subscription; public AuthenticationDataCommand(String authData) { - this(authData, null, null, null); - } - - public AuthenticationDataCommand(String authData, String subscription) { - this(authData, null, null, subscription); - } - - public AuthenticationDataCommand(String authData, SocketAddress remoteAddress, SSLSession sslSession, - String subscription) { - this.authData = authData; - this.remoteAddress = remoteAddress; - this.sslSession = sslSession; - this.subscription = subscription; + this(authData, null, null); } public AuthenticationDataCommand(String authData, SocketAddress remoteAddress, SSLSession sslSession) { @@ -100,22 +87,4 @@ public Certificate[] getTlsCertificates() { return null; } } - - /* - * Subscription - */ - @Override - public boolean hasSubscription() { - return this.subscription != null; - } - - @Override - public void setSubscription(String subscription) { - this.subscription = subscription; - } - - @Override - public String getSubscription() { - return subscription; - } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java index 958e5eab9c462..8a8dda2f17715 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java @@ -35,7 +35,7 @@ public AuthenticationDataHttp(HttpServletRequest request) { throw new IllegalArgumentException(); } this.request = request; - this.remoteAddress = new InetSocketAddress(request.getRemoteAddr(), request.getRemotePort()); + this.remoteAddress = InetSocketAddress.createUnresolved(request.getRemoteAddr(), request.getRemotePort()); } /* diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttps.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttps.java index 4e1d33b5ec5e7..0c262f5c62056 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttps.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttps.java @@ -19,7 +19,6 @@ package org.apache.pulsar.broker.authentication; import java.security.cert.X509Certificate; - import javax.servlet.http.HttpServletRequest; public class AuthenticationDataHttps extends AuthenticationDataHttp { @@ -27,7 +26,7 @@ public class AuthenticationDataHttps extends AuthenticationDataHttp { protected final X509Certificate[] certificates; public AuthenticationDataHttps(HttpServletRequest request) { - super(request); + super(new AuthenticationProviderToken.HttpServletRequestWrapper(request)); certificates = (X509Certificate[]) request.getAttribute("javax.servlet.request.X509Certificate"); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java new file mode 100644 index 0000000000000..f6723609908ac --- /dev/null +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataSubscription.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.authentication; + +import java.net.SocketAddress; +import java.security.cert.Certificate; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class AuthenticationDataSubscription implements AuthenticationDataSource { + private final AuthenticationDataSource authData; + private final String subscription; + + public AuthenticationDataSubscription(AuthenticationDataSource authData, String subscription) { + this.authData = authData; + this.subscription = subscription; + } + + @Override + public boolean hasDataFromCommand() { + return authData.hasDataFromCommand(); + } + + @Override + public String getCommandData() { + return authData.getCommandData(); + } + + @Override + public boolean hasDataFromPeer() { + return authData.hasDataFromPeer(); + } + + @Override + public SocketAddress getPeerAddress() { + return authData.getPeerAddress(); + } + + @Override + public boolean hasDataFromTls() { + return authData.hasDataFromTls(); + } + + @Override + public Certificate[] getTlsCertificates() { + return authData.getTlsCertificates(); + } + + @Override + public boolean hasSubscription() { + return this.subscription != null; + } + + @Override + public String getSubscription() { + return subscription; + } +} diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java index 46c1e3a36de6d..9f6bacf729891 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasic.java @@ -19,28 +19,32 @@ package org.apache.pulsar.broker.authentication; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.Arrays; import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.naming.AuthenticationException; +import lombok.Cleanup; import org.apache.commons.codec.digest.Crypt; import org.apache.commons.codec.digest.Md5Crypt; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.ServiceConfiguration; - -import lombok.Cleanup; import org.apache.pulsar.broker.authentication.metrics.AuthenticationMetrics; - -import javax.naming.AuthenticationException; -import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.IOException; +import org.apache.pulsar.client.api.url.URL; public class AuthenticationProviderBasic implements AuthenticationProvider { private static final String HTTP_HEADER_NAME = "Authorization"; private static final String CONF_SYSTEM_PROPERTY_KEY = "pulsar.auth.basic.conf"; + private static final String CONF_PULSAR_PROPERTY_KEY = "basicAuthConf"; private Map users; @Override @@ -48,16 +52,38 @@ public void close() throws IOException { // noop } + public static byte[] readData(String data) + throws IOException, URISyntaxException, InstantiationException, IllegalAccessException { + if (data.startsWith("data:") || data.startsWith("file:")) { + return IOUtils.toByteArray(URL.createURL(data)); + } else if (Files.exists(Paths.get(data))) { + return Files.readAllBytes(Paths.get(data)); + } else if (org.apache.commons.codec.binary.Base64.isBase64(data)) { + return Base64.getDecoder().decode(data); + } else { + String msg = "Not supported config"; + throw new IllegalArgumentException(msg); + } + } + @Override public void initialize(ServiceConfiguration config) throws IOException { - File confFile = new File(System.getProperty(CONF_SYSTEM_PROPERTY_KEY)); - if (!confFile.exists()) { - throw new IOException("The password auth conf file does not exist"); - } else if (!confFile.isFile()) { - throw new IOException("The path is not a file"); + String data = config.getProperties().getProperty(CONF_PULSAR_PROPERTY_KEY); + if (StringUtils.isEmpty(data)) { + data = System.getProperty(CONF_SYSTEM_PROPERTY_KEY); + } + if (StringUtils.isEmpty(data)) { + throw new IOException("No basic authentication config provided"); + } + + @Cleanup BufferedReader reader = null; + try { + byte[] bytes = readData(data); + reader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(bytes))); + } catch (Exception e) { + throw new IllegalArgumentException(e); } - @Cleanup BufferedReader reader = new BufferedReader(new FileReader(confFile)); users = new HashMap<>(); for (String line : reader.lines().toArray(s -> new String[s])) { List splitLine = Arrays.asList(line.split(":")); @@ -99,7 +125,8 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat throw new AuthenticationException(msg); } } catch (AuthenticationException exception) { - AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(), exception.getMessage()); + AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(), + exception.getMessage()); throw exception; } AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java index a79fabef3deb2..9ec1c2eb706cc 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderList.java @@ -206,7 +206,7 @@ public boolean authenticateHttpRequest(HttpServletRequest request, HttpServletRe } } ); - return authenticated.booleanValue(); + return authenticated; } @Override diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java index 21bda4c97bf46..dccd7bbb2b724 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderToken.java @@ -19,35 +19,32 @@ package org.apache.pulsar.broker.authentication; import static java.nio.charset.StandardCharsets.UTF_8; - +import com.google.common.annotations.VisibleForTesting; +import io.jsonwebtoken.Claims; +import io.jsonwebtoken.ExpiredJwtException; +import io.jsonwebtoken.Jwt; +import io.jsonwebtoken.JwtException; +import io.jsonwebtoken.JwtParser; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.RequiredTypeException; +import io.jsonwebtoken.SignatureAlgorithm; +import io.jsonwebtoken.security.SignatureException; +import io.prometheus.client.Counter; +import io.prometheus.client.Histogram; import java.io.IOException; import java.net.SocketAddress; import java.security.Key; - import java.util.Date; import java.util.List; import javax.naming.AuthenticationException; import javax.net.ssl.SSLSession; - -import com.google.common.annotations.VisibleForTesting; -import io.jsonwebtoken.ExpiredJwtException; -import io.jsonwebtoken.RequiredTypeException; -import io.jsonwebtoken.JwtParser; -import io.prometheus.client.Counter; -import io.prometheus.client.Histogram; +import javax.servlet.http.HttpServletRequest; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.metrics.AuthenticationMetrics; import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; import org.apache.pulsar.common.api.AuthData; -import io.jsonwebtoken.Claims; -import io.jsonwebtoken.Jwt; -import io.jsonwebtoken.JwtException; -import io.jsonwebtoken.Jwts; -import io.jsonwebtoken.SignatureAlgorithm; -import io.jsonwebtoken.security.SignatureException; - public class AuthenticationProviderToken implements AuthenticationProvider { static final String HTTP_HEADER_NAME = "Authorization"; @@ -368,4 +365,25 @@ public boolean isExpired() { return expiration < System.currentTimeMillis(); } } + public static final class HttpServletRequestWrapper extends javax.servlet.http.HttpServletRequestWrapper { + private final HttpServletRequest request; + + public HttpServletRequestWrapper(HttpServletRequest request) { + super(request); + this.request = request; + } + + @Override + public String getHeader(String name) { + // The browser javascript WebSocket client couldn't add the auth param to the request header, use the + // query param `token` to transport the auth token for the browser javascript WebSocket client. + if (name.equals(HTTP_HEADER_NAME) && request.getHeader(HTTP_HEADER_NAME) == null) { + String token = request.getParameter(TOKEN); + if (token != null) { + return !token.startsWith(HTTP_HEADER_VALUE_PREFIX) ? HTTP_HEADER_VALUE_PREFIX + token : token; + } + } + return super.getHeader(name); + } + } } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/AuthorizationService.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/AuthorizationService.java index 6943e95999bba..ae36a9bba6ece 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/AuthorizationService.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/AuthorizationService.java @@ -43,6 +43,7 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import static java.util.concurrent.TimeUnit.SECONDS; @@ -396,11 +397,15 @@ public boolean allowTenantOperation(String tenantName, AuthenticationDataSource authData) { try { return allowTenantOperationAsync( - tenantName, operation, originalRole, role, authData).get(); + tenantName, operation, originalRole, role, authData).get( + conf.getZooKeeperOperationTimeoutSeconds(), SECONDS); } catch (InterruptedException e) { + Thread.currentThread().interrupt(); throw new RestException(e); } catch (ExecutionException e) { throw new RestException(e.getCause()); + } catch (TimeoutException e) { + throw new RestException(e); } } @@ -521,11 +526,15 @@ public boolean allowNamespacePolicyOperation(NamespaceName namespaceName, AuthenticationDataSource authData) { try { return allowNamespacePolicyOperationAsync( - namespaceName, policy, operation, originalRole, role, authData).get(); + namespaceName, policy, operation, originalRole, role, authData).get( + conf.getZooKeeperOperationTimeoutSeconds(), SECONDS); } catch (InterruptedException e) { + Thread.currentThread().interrupt(); throw new RestException(e); } catch (ExecutionException e) { throw new RestException(e.getCause()); + } catch (TimeoutException e) { + throw new RestException(e); } } @@ -585,11 +594,15 @@ public Boolean allowTopicPolicyOperation(TopicName topicName, AuthenticationDataSource authData) { try { return allowTopicPolicyOperationAsync( - topicName, policy, operation, originalRole, role, authData).get(); + topicName, policy, operation, originalRole, role, authData).get( + conf.getZooKeeperOperationTimeoutSeconds(), SECONDS); } catch (InterruptedException e) { + Thread.currentThread().interrupt(); throw new RestException(e); } catch (ExecutionException e) { throw new RestException(e.getCause()); + } catch (TimeoutException e) { + throw new RestException(e); } } @@ -667,9 +680,10 @@ public Boolean allowTopicOperation(TopicName topicName, TopicOperation operation, String originalRole, String role, - AuthenticationDataSource authData) { + AuthenticationDataSource authData) throws Exception { try { - return allowTopicOperationAsync(topicName, operation, originalRole, role, authData).get(); + return allowTopicOperationAsync(topicName, operation, originalRole, role, authData).get( + conf.getZooKeeperOperationTimeoutSeconds(), SECONDS); } catch (InterruptedException e) { throw new RestException(e); } catch (ExecutionException e) { diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java index dcdf779780ef6..d72c951c8894c 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProvider.java @@ -23,30 +23,33 @@ import io.jsonwebtoken.JwtParser; import io.jsonwebtoken.Jwts; import io.jsonwebtoken.RequiredTypeException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; -import org.apache.pulsar.broker.cache.ConfigurationCacheService; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.NamespaceOperation; import org.apache.pulsar.common.policies.data.PolicyName; import org.apache.pulsar.common.policies.data.PolicyOperation; +import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantOperation; import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.RestException; +import org.apache.pulsar.metadata.api.MetadataStoreException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; - public class MultiRolesTokenAuthorizationProvider extends PulsarAuthorizationProvider { private static final Logger log = LoggerFactory.getLogger(MultiRolesTokenAuthorizationProvider.class); @@ -59,7 +62,7 @@ public class MultiRolesTokenAuthorizationProvider extends PulsarAuthorizationPro // The token's claim that corresponds to the "role" string static final String CONF_TOKEN_AUTH_CLAIM = "tokenAuthClaim"; - private JwtParser parser; + private final JwtParser parser; private String roleClaim; public MultiRolesTokenAuthorizationProvider() { @@ -82,75 +85,118 @@ public void initialize(ServiceConfiguration conf, PulsarResources pulsarResource super.initialize(conf, pulsarResources); } - private List getRoles(AuthenticationDataSource authData) { + @Override + public CompletableFuture isSuperUser(String role, AuthenticationDataSource authenticationData, + ServiceConfiguration serviceConfiguration) { + Set roles = getRoles(authenticationData); + if (roles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } + Set superUserRoles = serviceConfiguration.getSuperUserRoles(); + if (superUserRoles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } + + return CompletableFuture.completedFuture(roles.stream().anyMatch(superUserRoles::contains)); + } + + @Override + public CompletableFuture validateTenantAdminAccess(String tenantName, String role, + AuthenticationDataSource authData) { + return isSuperUser(role, authData, conf) + .thenCompose(isSuperUser -> { + if (isSuperUser) { + return CompletableFuture.completedFuture(true); + } + Set roles = getRoles(authData); + if (roles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } + + return pulsarResources.getTenantResources() + .getTenantAsync(tenantName) + .thenCompose(op -> { + if (op.isPresent()) { + TenantInfo tenantInfo = op.get(); + if (tenantInfo.getAdminRoles() == null || tenantInfo.getAdminRoles().isEmpty()) { + return CompletableFuture.completedFuture(false); + } + + return CompletableFuture.completedFuture(roles.stream() + .anyMatch(n -> tenantInfo.getAdminRoles().contains(n))); + } else { + throw new RestException(Response.Status.NOT_FOUND, "Tenant does not exist"); + } + }).exceptionally(ex -> { + Throwable cause = ex.getCause(); + if (cause instanceof MetadataStoreException.NotFoundException) { + log.warn("Failed to get tenant info data for non existing tenant {}", tenantName); + throw new RestException(Response.Status.NOT_FOUND, "Tenant does not exist"); + } + log.error("Failed to get tenant {}", tenantName, cause); + throw new RestException(cause); + }); + }); + } + + private Set getRoles(AuthenticationDataSource authData) { String token = null; if (authData.hasDataFromCommand()) { // Authenticate Pulsar binary connection token = authData.getCommandData(); if (StringUtils.isBlank(token)) { - return Collections.emptyList(); + return Collections.emptySet(); } } else if (authData.hasDataFromHttp()) { // The format here should be compliant to RFC-6750 // (https://tools.ietf.org/html/rfc6750#section-2.1). Eg: Authorization: Bearer xxxxxxxxxxxxx String httpHeaderValue = authData.getHttpHeader(HTTP_HEADER_NAME); if (httpHeaderValue == null || !httpHeaderValue.startsWith(HTTP_HEADER_VALUE_PREFIX)) { - return Collections.emptyList(); + return Collections.emptySet(); } // Remove prefix token = httpHeaderValue.substring(HTTP_HEADER_VALUE_PREFIX.length()); } - if (token == null) - return Collections.emptyList(); + if (token == null) { + return Collections.emptySet(); + } String[] splitToken = token.split("\\."); + if (splitToken.length < 2) { + log.warn("Unable to extract additional roles from JWT token"); + return Collections.emptySet(); + } String unsignedToken = splitToken[0] + "." + splitToken[1] + "."; Jwt jwt = parser.parseClaimsJwt(unsignedToken); try { - Collections.singletonList(jwt.getBody().get(roleClaim, String.class)); + return new HashSet<>(Collections.singletonList(jwt.getBody().get(roleClaim, String.class))); } catch (RequiredTypeException requiredTypeException) { try { List list = jwt.getBody().get(roleClaim, List.class); if (list != null) { - return list; + return new HashSet(list); } } catch (RequiredTypeException requiredTypeException1) { - return Collections.emptyList(); + return Collections.emptySet(); } } - return Collections.emptyList(); + return Collections.emptySet(); } - public CompletableFuture authorize(AuthenticationDataSource authenticationData, Function> authorizeFunc) { - List roles = getRoles(authenticationData); + public CompletableFuture authorize(AuthenticationDataSource authenticationData, Function> authorizeFunc) { + Set roles = getRoles(authenticationData); + if (roles.isEmpty()) { + return CompletableFuture.completedFuture(false); + } List> futures = new ArrayList<>(roles.size()); roles.forEach(r -> futures.add(authorizeFunc.apply(r))); - return CompletableFuture.supplyAsync(() -> { - do { - try { - List> doneFutures = new ArrayList<>(); - FutureUtil.waitForAny(futures).get(); - for (CompletableFuture future : futures) { - if (!future.isDone()) continue; - doneFutures.add(future); - if (future.get()) { - futures.forEach(f -> { - if (!f.isDone()) f.cancel(false); - }); - return true; - } - } - futures.removeAll(doneFutures); - } catch (InterruptedException | ExecutionException ignored) { - } - } while (!futures.isEmpty()); - return false; - }); + return FutureUtil.waitForAny(futures, ret -> (boolean) ret).thenApply(v -> v.isPresent()); } /** diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java index e355b122fc013..b753d2ed6346c 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authorization/PulsarAuthorizationProvider.java @@ -59,7 +59,8 @@ public class PulsarAuthorizationProvider implements AuthorizationProvider { private static final Logger log = LoggerFactory.getLogger(PulsarAuthorizationProvider.class); public ServiceConfiguration conf; - private PulsarResources pulsarResources; + + protected PulsarResources pulsarResources; public PulsarAuthorizationProvider() { @@ -286,6 +287,7 @@ public CompletableFuture grantPermissionAsync(NamespaceName namespaceName, validatePoliciesReadOnlyAccess(); } catch (Exception e) { result.completeExceptionally(e); + return result; } try { @@ -364,20 +366,23 @@ private CompletableFuture updateSubscriptionPermissionAsync(NamespaceName } private CompletableFuture checkAuthorization(TopicName topicName, String role, AuthAction action) { - return checkPermission(topicName, role, action) - .thenApply(isPermission -> isPermission && checkCluster(topicName)); + return checkPermission(topicName, role, action). + thenApply(isPermission -> isPermission). + thenCompose(permission -> permission ? checkCluster(topicName) : + CompletableFuture.completedFuture(false)); } - private boolean checkCluster(TopicName topicName) { + private CompletableFuture checkCluster(TopicName topicName) { if (topicName.isGlobal() || conf.getClusterName().equals(topicName.getCluster())) { - return true; - } else { - if (log.isDebugEnabled()) { - log.debug("Topic [{}] does not belong to local cluster [{}]", topicName.toString(), - conf.getClusterName()); - } - return false; + return CompletableFuture.completedFuture(true); + } + if (log.isDebugEnabled()) { + log.debug("Topic [{}] does not belong to local cluster [{}]", topicName.toString(), conf.getClusterName()); } + return pulsarResources.getClusterResources().listAsync() + .thenApply(clusters -> { + return clusters.contains(topicName.getCluster()); + }); } public CompletableFuture checkPermission(TopicName topicName, String role, AuthAction action) { @@ -520,22 +525,78 @@ public CompletableFuture allowNamespaceOperationAsync(NamespaceName nam String role, NamespaceOperation operation, AuthenticationDataSource authData) { - CompletableFuture isAuthorizedFuture; - switch (operation) { - case PACKAGES: - isAuthorizedFuture = allowTheSpecifiedActionOpsAsync(namespaceName, role, authData, AuthAction.packages); - break; - default: - isAuthorizedFuture = CompletableFuture.completedFuture(false); + if (log.isDebugEnabled()) { + log.debug("Check allowNamespaceOperationAsync [{}] on [{}].", operation.name(), namespaceName); } - CompletableFuture isTenantAdminFuture = validateTenantAdminAccess(namespaceName.getTenant(), role, authData); - return isTenantAdminFuture.thenCombine(isAuthorizedFuture, (isTenantAdmin, isAuthorized) -> { - if (log.isDebugEnabled()) { - log.debug("Verify if role {} is allowed to {} to topic {}: isTenantAdmin={}, isAuthorized={}", - role, operation, namespaceName, isTenantAdmin, isAuthorized); - } - return isTenantAdmin || isAuthorized; - }); + return validateTenantAdminAccess(namespaceName.getTenant(), role, authData) + .thenCompose(isSuperUserOrAdmin -> { + if (log.isDebugEnabled()) { + log.debug("Verify if role {} is allowed to {} to namespace {}: isSuperUserOrAdmin={}", + role, operation, namespaceName, isSuperUserOrAdmin); + } + if (isSuperUserOrAdmin) { + return CompletableFuture.completedFuture(true); + } else { + switch (operation) { + case PACKAGES: + return allowTheSpecifiedActionOpsAsync( + namespaceName, role, authData, AuthAction.packages); + case GET_TOPIC: + case GET_TOPICS: + case GET_BUNDLE: + return allowConsumeOrProduceOpsAsync(namespaceName, role, authData); + case UNSUBSCRIBE: + case CLEAR_BACKLOG: + return allowTheSpecifiedActionOpsAsync( + namespaceName, role, authData, AuthAction.consume); + case CREATE_TOPIC: + case DELETE_TOPIC: + case ADD_BUNDLE: + case DELETE_BUNDLE: + case GRANT_PERMISSION: + case GET_PERMISSION: + case REVOKE_PERMISSION: + return CompletableFuture.completedFuture(false); + default: + return FutureUtil.failedFuture(new IllegalStateException( + "NamespaceOperation [" + operation.name() + "] is not supported.")); + } + } + }); + } + + private CompletableFuture allowConsumeOrProduceOpsAsync(NamespaceName namespaceName, + String role, + AuthenticationDataSource authenticationData) { + CompletableFuture finalResult = new CompletableFuture<>(); + allowTheSpecifiedActionOpsAsync(namespaceName, role, authenticationData, AuthAction.consume) + .whenComplete((consumeAuthorized, e) -> { + if (e == null) { + if (consumeAuthorized) { + finalResult.complete(consumeAuthorized); + return; + } + } else { + if (log.isDebugEnabled()) { + log.debug("Namespace [{}] Role [{}] exception occurred while trying to check Consume " + + "permission. {}", namespaceName, role, e.getCause()); + } + } + allowTheSpecifiedActionOpsAsync(namespaceName, role, authenticationData, AuthAction.produce) + .whenComplete((produceAuthorized, ex) -> { + if (ex == null) { + finalResult.complete(produceAuthorized); + } else { + if (log.isDebugEnabled()) { + log.debug("Namespace [{}] Role [{}] exception occurred while trying to check " + + "Produce permission. {}", namespaceName, role, ex.getCause()); + } + finalResult.completeExceptionally(ex.getCause()); + } + }); + }); + + return finalResult; } @Override @@ -552,40 +613,8 @@ public CompletableFuture allowTopicOperationAsync(TopicName topicName, String role, TopicOperation operation, AuthenticationDataSource authData) { - log.debug("Check allowTopicOperationAsync [" + operation.name() + "] on [" + topicName.toString() + "]."); - - CompletableFuture isAuthorizedFuture; - - switch (operation) { - case LOOKUP: - case GET_STATS: - isAuthorizedFuture = canLookupAsync(topicName, role, authData); - break; - case PRODUCE: - isAuthorizedFuture = canProduceAsync(topicName, role, authData); - break; - case GET_SUBSCRIPTIONS: - case CONSUME: - case SUBSCRIBE: - case UNSUBSCRIBE: - case SKIP: - case EXPIRE_MESSAGES: - case PEEK_MESSAGES: - case RESET_CURSOR: - case SET_REPLICATED_SUBSCRIPTION_STATUS: - isAuthorizedFuture = canConsumeAsync(topicName, role, authData, authData.getSubscription()); - break; - case TERMINATE: - case COMPACT: - case OFFLOAD: - case UNLOAD: - case ADD_BUNDLE_RANGE: - case GET_BUNDLE_RANGE: - case DELETE_BUNDLE_RANGE: - return validateTenantAdminAccess(topicName.getTenant(), role, authData); - default: - return FutureUtil.failedFuture( - new IllegalStateException("TopicOperation [" + operation.name() + "] is not supported.")); + if (log.isDebugEnabled()) { + log.debug("Check allowTopicOperationAsync [{}] on [{}].", operation.name(), topicName); } return validateTenantAdminAccess(topicName.getTenant(), role, authData) @@ -597,7 +626,36 @@ public CompletableFuture allowTopicOperationAsync(TopicName topicName, if (isSuperUserOrAdmin) { return CompletableFuture.completedFuture(true); } else { - return isAuthorizedFuture; + switch (operation) { + case LOOKUP: + case GET_STATS: + case GET_METADATA: + return canLookupAsync(topicName, role, authData); + case PRODUCE: + return canProduceAsync(topicName, role, authData); + case GET_SUBSCRIPTIONS: + case CONSUME: + case SUBSCRIBE: + case UNSUBSCRIBE: + case SKIP: + case EXPIRE_MESSAGES: + case PEEK_MESSAGES: + case RESET_CURSOR: + case GET_BACKLOG_SIZE: + case SET_REPLICATED_SUBSCRIPTION_STATUS: + return canConsumeAsync(topicName, role, authData, authData.getSubscription()); + case TERMINATE: + case COMPACT: + case OFFLOAD: + case UNLOAD: + case ADD_BUNDLE_RANGE: + case GET_BUNDLE_RANGE: + case DELETE_BUNDLE_RANGE: + return CompletableFuture.completedFuture(false); + default: + return FutureUtil.failedFuture(new IllegalStateException( + "TopicOperation [" + operation.name() + "] is not supported.")); + } } }); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/BaseResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/BaseResources.java index 9061dd76b47fd..a6972cd01f4eb 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/BaseResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/BaseResources.java @@ -31,6 +31,7 @@ import java.util.function.Function; import lombok.Getter; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.metadata.api.MetadataCache; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreException; @@ -167,6 +168,10 @@ protected boolean exists(String path) throws MetadataStoreException { } } + protected CompletableFuture existsAsync(String path) { + return cache.exists(path); + } + public int getOperationTimeoutSec() { return operationTimeoutSec; } @@ -177,37 +182,70 @@ protected static String joinPath(String... parts) { return sb.toString(); } + protected static CompletableFuture deleteRecursiveAsync(BaseResources resources, final String pathRoot) { + PathUtils.validatePath(pathRoot); + CompletableFuture completableFuture = new CompletableFuture<>(); + listSubTreeBFSAsync(resources, pathRoot).whenComplete((tree, ex) -> { + if (ex == null) { + log.debug("Deleting {} with size {}", tree, tree.size()); + + final List> futures = new ArrayList<>(); + for (int i = tree.size() - 1; i >= 0; --i) { + // Delete the leaves first and eventually get rid of the root + futures.add(resources.deleteAsync(tree.get(i))); + } + + FutureUtil.waitForAll(futures).handle((result, exception) -> { + if (exception != null) { + log.error("Failed to remove partitioned topics", exception); + return completableFuture.completeExceptionally(exception.getCause()); + } + return completableFuture.complete(null); + }); + } else { + log.warn("Failed to delete partitioned topics z-node [{}]", pathRoot, ex.getCause()); + } + }); - protected static void deleteRecursive(BaseResources resources, final String pathRoot) throws MetadataStoreException { - PathUtils.validatePath(pathRoot); - List tree = listSubTreeBFS(resources, pathRoot); - log.debug("Deleting {} with size {}", tree, tree.size()); - log.debug("Deleting " + tree.size() + " subnodes "); - for (int i = tree.size() - 1; i >= 0; --i) { - // Delete the leaves first and eventually get rid of the root - resources.delete(tree.get(i)); - } + return completableFuture; } - protected static List listSubTreeBFS(BaseResources resources, final String pathRoot) - throws MetadataStoreException { + protected static CompletableFuture> listSubTreeBFSAsync(BaseResources resources, + final String pathRoot) { Deque queue = new LinkedList<>(); List tree = new ArrayList<>(); queue.add(pathRoot); tree.add(pathRoot); - while (true) { + CompletableFuture> completableFuture = new CompletableFuture<>(); + final List> futures = new ArrayList<>(); + for (int i = 0; i < queue.size(); i++) { String node = queue.pollFirst(); if (node == null) { break; } - List children = resources.getChildren(node); - for (final String child : children) { - final String childPath = node + "/" + child; - queue.add(childPath); - tree.add(childPath); - } + futures.add(resources.getChildrenAsync(node) + .whenComplete((children, ex) -> { + if (ex == null) { + for (final String child : (List) children) { + final String childPath = node + "/" + child; + queue.add(childPath); + tree.add(childPath); + } + } else { + log.warn("Failed to get data error from z-node [{}]", node); + } + })); } - return tree; + + FutureUtil.waitForAll(futures).handle((result, exception) -> { + if (exception != null) { + log.error("Failed to get partitioned topics", exception); + return completableFuture.completeExceptionally(exception.getCause()); + } + return completableFuture.complete(tree); + }); + + return completableFuture; } } \ No newline at end of file diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/ClusterResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/ClusterResources.java index 1a3cf89b59efe..32586d246926e 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/ClusterResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/ClusterResources.java @@ -42,6 +42,10 @@ public ClusterResources(MetadataStore store, int operationTimeoutSec) { this.failureDomainResources = new FailureDomainResources(store, FailureDomainImpl.class, operationTimeoutSec); } + public CompletableFuture> listAsync() { + return getChildrenAsync(BASE_CLUSTERS_PATH).thenApply(list -> new HashSet<>(list)); + } + public Set list() throws MetadataStoreException { return new HashSet<>(super.getChildren(BASE_CLUSTERS_PATH)); } @@ -117,20 +121,20 @@ public Optional getFailureDomain(String clusterName, String d public void deleteFailureDomain(String clusterName, String domainName) throws MetadataStoreException { String path = joinPath(BASE_CLUSTERS_PATH, clusterName, FAILURE_DOMAIN, domainName); - if (exists(path)) { - delete(path); - } + delete(path); } public void deleteFailureDomains(String clusterName) throws MetadataStoreException { String failureDomainPath = joinPath(BASE_CLUSTERS_PATH, clusterName, FAILURE_DOMAIN); + if (!exists(failureDomainPath)) { + return; + } + for (String domain : getChildren(failureDomainPath)) { delete(joinPath(failureDomainPath, domain)); } - if (exists(failureDomainPath)) { - delete(failureDomainPath); - } + delete(failureDomainPath); } public void setFailureDomainWithCreate(String clusterName, String domainName, diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/DynamicConfigurationResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/DynamicConfigurationResources.java index 8137dd837ef96..d918dc8f2c5d3 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/DynamicConfigurationResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/DynamicConfigurationResources.java @@ -40,8 +40,8 @@ public CompletableFuture>> getDynamicConfigurationA return getAsync(BROKER_SERVICE_CONFIGURATION_PATH); } - public Map getDynamicConfiguration() throws MetadataStoreException { - return get(BROKER_SERVICE_CONFIGURATION_PATH).orElse(Collections.emptyMap()); + public Optional> getDynamicConfiguration() throws MetadataStoreException { + return get(BROKER_SERVICE_CONFIGURATION_PATH); } public void setDynamicConfigurationWithCreate( diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/LocalPoliciesResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/LocalPoliciesResources.java index 0023e5fc283eb..29a6a4666e2e4 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/LocalPoliciesResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/LocalPoliciesResources.java @@ -79,6 +79,10 @@ public void deleteLocalPolicies(NamespaceName ns) throws MetadataStoreException delete(joinPath(LOCAL_POLICIES_ROOT, ns.toString())); } + public CompletableFuture deleteLocalPoliciesAsync(NamespaceName ns) { + return deleteAsync(joinPath(LOCAL_POLICIES_ROOT, ns.toString())); + } + public static boolean isLocalPoliciesPath(String path) { return path.startsWith(LOCAL_POLICIES_ROOT); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java index dcba2ac854c3c..8f677f069d578 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/NamespaceResources.java @@ -18,18 +18,14 @@ */ package org.apache.pulsar.broker.resources; -import static org.apache.pulsar.common.policies.path.PolicyPath.path; import com.fasterxml.jackson.core.type.TypeReference; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; import lombok.Getter; - import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; @@ -39,13 +35,17 @@ import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.impl.NamespaceIsolationPolicies; import org.apache.pulsar.common.util.Codec; -import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.metadata.api.MetadataCache; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Getter public class NamespaceResources extends BaseResources { + private static final Logger log = LoggerFactory.getLogger(NamespaceResources.class); + private final IsolationPolicyResources isolationPolicies; private final PartitionedTopicResources partitionedTopicResources; private final MetadataStore configurationStore; @@ -53,7 +53,8 @@ public class NamespaceResources extends BaseResources { private final MetadataCache localPoliciesCache; private static final String POLICIES_READONLY_FLAG_PATH = "/admin/flags/policies-readonly"; - + private static final String NAMESPACE_BASE_PATH = "/namespace"; + private static final String BUNDLE_DATA_BASE_PATH = "/loadbalance/bundle-data"; public NamespaceResources(MetadataStore localStore, MetadataStore configurationStore, int operationTimeoutSec) { super(configurationStore, Policies.class, operationTimeoutSec); this.configurationStore = configurationStore; @@ -101,6 +102,10 @@ public void deletePolicies(NamespaceName ns) throws MetadataStoreException{ delete(joinPath(BASE_POLICIES_PATH, ns.toString())); } + public CompletableFuture deletePoliciesAsync(NamespaceName ns){ + return deleteAsync(joinPath(BASE_POLICIES_PATH, ns.toString())); + } + public Optional getPolicies(NamespaceName ns) throws MetadataStoreException{ return get(joinPath(BASE_POLICIES_PATH, ns.toString())); } @@ -122,7 +127,42 @@ public CompletableFuture setPoliciesAsync(NamespaceName ns, Function deleteNamespaceAsync(NamespaceName ns) { + final String namespacePath = joinPath(NAMESPACE_BASE_PATH, ns.toString()); + CompletableFuture future = new CompletableFuture(); + deleteAsync(namespacePath).whenComplete((ignore, ex) -> { + if (ex != null && ex.getCause().getCause() instanceof KeeperException.NoNodeException) { + future.complete(null); + } else if (ex != null) { + future.completeExceptionally(ex); + } else { + future.complete(null); + } + }); + + return future; + } + + // clear resource of `/namespace/{tenant}` for zk-node + public CompletableFuture deleteTenantAsync(String tenant) { + final String tenantPath = joinPath(NAMESPACE_BASE_PATH, tenant); + CompletableFuture future = new CompletableFuture(); + deleteAsync(tenantPath).whenComplete((ignore, ex) -> { + if (ex != null && ex.getCause().getCause() instanceof KeeperException.NoNodeException) { + future.complete(null); + } else if (ex != null) { + future.completeExceptionally(ex); + } else { + future.complete(null); + } + }); + + return future; } public static NamespaceName namespaceFromPath(String path) { @@ -208,17 +248,72 @@ public boolean partitionedTopicExists(TopicName tn) throws MetadataStoreExceptio tn.getEncodedLocalName())); } + public CompletableFuture partitionedTopicExistsAsync(TopicName tn) { + return existsAsync(joinPath(PARTITIONED_TOPIC_PATH, tn.getNamespace(), tn.getDomain().value(), + tn.getEncodedLocalName())); + } + public CompletableFuture deletePartitionedTopicAsync(TopicName tn) { return deleteAsync(joinPath(PARTITIONED_TOPIC_PATH, tn.getNamespace(), tn.getDomain().value(), tn.getEncodedLocalName())); } - public void clearPartitionedTopicMetadata(NamespaceName namespaceName) throws MetadataStoreException { + public CompletableFuture clearPartitionedTopicMetadataAsync(NamespaceName namespaceName) { final String globalPartitionedPath = joinPath(PARTITIONED_TOPIC_PATH, namespaceName.toString()); - // check whether partitioned topics metadata node exist - if (exists(globalPartitionedPath)) { - deleteRecursive(this, globalPartitionedPath); - } + + CompletableFuture completableFuture = new CompletableFuture<>(); + + deleteRecursiveAsync(this, globalPartitionedPath) + .thenAccept(ignore -> { + log.info("Clear partitioned topic metadata [{}] success.", namespaceName); + completableFuture.complete(null); + }).exceptionally(ex -> { + if (ex.getCause().getCause() instanceof KeeperException.NoNodeException) { + completableFuture.complete(null); + } else { + log.error("Clear partitioned topic metadata failed."); + completableFuture.completeExceptionally(ex.getCause()); + return null; + } + return null; + }); + + return completableFuture; } } + + + // clear resource of `/loadbalance/bundle-data/{tenant}/{namespace}/` for zk-node + public CompletableFuture deleteBundleDataAsync(NamespaceName ns) { + final String namespaceBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, ns.toString()); + CompletableFuture future = new CompletableFuture(); + deleteRecursiveAsync(this, namespaceBundlePath).whenComplete((ignore, ex) -> { + if (ex instanceof MetadataStoreException.NotFoundException) { + future.complete(null); + } else if (ex != null) { + future.completeExceptionally(ex); + } else { + future.complete(null); + } + }); + + return future; + } + + // clear resource of `/loadbalance/bundle-data/{tenant}/` for zk-node + public CompletableFuture deleteBundleDataTenantAsync(String tenant) { + final String tenantBundlePath = joinPath(BUNDLE_DATA_BASE_PATH, tenant); + CompletableFuture future = new CompletableFuture(); + deleteRecursiveAsync(this, tenantBundlePath).whenComplete((ignore, ex) -> { + if (ex instanceof MetadataStoreException.NotFoundException) { + future.complete(null); + } else if (ex != null) { + future.completeExceptionally(ex); + } else { + future.complete(null); + } + }); + + return future; + } } \ No newline at end of file diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java index a33d446ed8293..a01d57817eed6 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/PulsarResources.java @@ -50,8 +50,9 @@ public class PulsarResources { private final BookieResources bookieResources; @Getter private final TopicResources topicResources; - + @Getter private final Optional localMetadataStore; + @Getter private final Optional configurationMetadataStore; public PulsarResources(MetadataStore localMetadataStore, MetadataStore configurationMetadataStore) { diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TenantResources.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TenantResources.java index 95820576c50c0..36c88cf3a8b60 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TenantResources.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/resources/TenantResources.java @@ -134,7 +134,7 @@ public CompletableFuture hasActiveNamespace(String tenant) { } if (children != null && !children.isEmpty()) { checkNs.completeExceptionally( - new IllegalStateException("Tenant has active namespace")); + new IllegalStateException("The tenant still has active namespaces")); return; } String namespace = NamespaceName.get(tenant, clusterOrNamespace).toString(); @@ -145,7 +145,7 @@ public CompletableFuture hasActiveNamespace(String tenant) { getAsync(joinPath(BASE_POLICIES_PATH, namespace)).thenApply(data -> { if (data.isPresent()) { checkNs.completeExceptionally(new IllegalStateException( - "Tenant has active namespace")); + "The tenant still has active namespaces")); } else { checkNs.complete(null); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/validator/MultipleListenerValidator.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/validator/MultipleListenerValidator.java index 1ba46e12739e2..aa5fdd6a15242 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/validator/MultipleListenerValidator.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/validator/MultipleListenerValidator.java @@ -39,10 +39,11 @@ public final class MultipleListenerValidator { /** * Validate the configuration of `advertisedListeners`, `internalListenerName`. - * 2. the listener name in `advertisedListeners` must not duplicate. - * 3. user can not assign same 'host:port' to different listener. - * 4. if `internalListenerName` is absent, the first `listener` in the `advertisedListeners` will be the `internalListenerName`. - * 5. if pulsar do not specify `brokerServicePortTls`, should only contain one entry of `pulsar://` per listener name. + * 1. `advertisedListeners` consists of a comma-separated list of endpoints. + * 2. Each endpoint consists of a listener name and an associated address (`listener:scheme://host:port`). + * 3. A listener name may be repeated to define both a non-TLS and a TLS endpoint. + * 4. Duplicate definitions are disallowed. + * 5. If `internalListenerName` is absent, set it to the first listener defined in `advertisedListeners`. * @param config the pulsar broker configure. * @return */ @@ -78,7 +79,7 @@ public static Map validateAndAnalysisAdvertisedListe if (entry.getValue().size() > 2) { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } - URI pulsarAddress = null, pulsarSslAddress = null; + URI pulsarAddress = null, pulsarSslAddress = null, pulsarHttpAddress = null, pulsarHttpsAddress = null; for (final String strUri : entry.getValue()) { try { URI uri = URI.create(strUri); @@ -94,19 +95,38 @@ public static Map validateAndAnalysisAdvertisedListe } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } + } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "http")) { + if (pulsarHttpAddress == null) { + pulsarHttpAddress = uri; + } else { + throw new IllegalArgumentException("there are redundant configure for listener `" + + entry.getKey() + "`"); + } + } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "https")) { + if (pulsarHttpsAddress == null) { + pulsarHttpsAddress = uri; + } else { + throw new IllegalArgumentException("there are redundant configure for listener `" + + entry.getKey() + "`"); + } } + String hostPort = String.format("%s:%d", uri.getHost(), uri.getPort()); - reverseMappings.computeIfAbsent(hostPort, k -> Sets.newTreeSet()); Set sets = reverseMappings.computeIfAbsent(hostPort, k -> Sets.newTreeSet()); sets.add(entry.getKey()); if (sets.size() > 1) { throw new IllegalArgumentException("must not specify `" + hostPort + "` to different listener."); } } catch (Throwable cause) { - throw new IllegalArgumentException("the value " + strUri + " in the `advertisedListeners` configure is invalid"); + throw new IllegalArgumentException("the value " + strUri + " in the `advertisedListeners` configure is invalid", cause); } } - result.put(entry.getKey(), AdvertisedListener.builder().brokerServiceUrl(pulsarAddress).brokerServiceUrlTls(pulsarSslAddress).build()); + result.put(entry.getKey(), AdvertisedListener.builder() + .brokerServiceUrl(pulsarAddress) + .brokerServiceUrlTls(pulsarSslAddress) + .brokerHttpUrl(pulsarHttpAddress) + .brokerHttpsUrl(pulsarHttpsAddress) + .build()); } return result; } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServletWithClassLoader.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServletWithClassLoader.java index 06a51c856c75d..7f1af734431ef 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServletWithClassLoader.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServletWithClassLoader.java @@ -19,9 +19,11 @@ package org.apache.pulsar.broker.web.plugin.servlet; import java.io.IOException; + import lombok.Data; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.ClassLoaderSwitcher; import org.apache.pulsar.common.configuration.PulsarConfiguration; import org.apache.pulsar.common.nar.NarClassLoader; import org.eclipse.jetty.servlet.ServletHolder; @@ -39,22 +41,30 @@ public class AdditionalServletWithClassLoader implements AdditionalServlet { @Override public void loadConfig(PulsarConfiguration pulsarConfiguration) { - servlet.loadConfig(pulsarConfiguration); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + servlet.loadConfig(pulsarConfiguration); + } } @Override public String getBasePath() { - return servlet.getBasePath(); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + return servlet.getBasePath(); + } } @Override public ServletHolder getServletHolder() { - return servlet.getServletHolder(); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + return servlet.getServletHolder(); + } } @Override public void close() { - servlet.close(); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + servlet.close(); + } try { classLoader.close(); } catch (IOException e) { diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java index 2451cf5af04f3..080e1c7509643 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServlets.java @@ -21,13 +21,12 @@ import com.google.common.collect.ImmutableMap; import java.io.IOException; -import java.util.Arrays; -import java.util.List; import java.util.Map; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.common.configuration.PulsarConfiguration; +import org.apache.pulsar.common.nar.NarClassLoader; /** * A collection of loaded additional servlets. @@ -71,18 +70,23 @@ public static AdditionalServlets load(PulsarConfiguration conf) throws IOExcepti if (additionalServlets == null) { additionalServlets = conf.getProperties().getProperty(PROXY_ADDITIONAL_SERVLETS); } + + String narExtractionDirectory = conf.getProperties().getProperty(NAR_EXTRACTION_DIRECTORY); + if(narExtractionDirectory == null) { + narExtractionDirectory = NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR; + } + if (additionalServletDirectory == null || additionalServlets == null) { return null; } AdditionalServletDefinitions definitions = AdditionalServletUtils.searchForServlets(additionalServletDirectory - , null); + , narExtractionDirectory); ImmutableMap.Builder builder = ImmutableMap.builder(); - List additionalServletsList = Arrays.asList(additionalServlets.split(",")); - additionalServletsList.forEach(servletName -> { - + String[] additionalServletsList = additionalServlets.split(","); + for (String servletName : additionalServletsList) { AdditionalServletMetadata definition = definitions.servlets().get(servletName); if (null == definition) { throw new RuntimeException("No additional servlet is found for name `" + servletName @@ -91,8 +95,7 @@ public static AdditionalServlets load(PulsarConfiguration conf) throws IOExcepti AdditionalServletWithClassLoader servletWithClassLoader; try { - servletWithClassLoader = AdditionalServletUtils.load(definition, - conf.getProperties().getProperty(NAR_EXTRACTION_DIRECTORY)); + servletWithClassLoader = AdditionalServletUtils.load(definition, narExtractionDirectory); if (servletWithClassLoader != null) { builder.put(servletName, servletWithClassLoader); } @@ -101,7 +104,7 @@ public static AdditionalServlets load(PulsarConfiguration conf) throws IOExcepti log.error("Failed to load the additional servlet for name `" + servletName + "`", e); throw new RuntimeException("Failed to load the additional servlet for name `" + servletName + "`"); } - }); + } Map servlets = builder.build(); if (servlets != null && !servlets.isEmpty()) { diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/common/configuration/VipStatus.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/common/configuration/VipStatus.java index c80f0a5471c93..af1c8b34ead1f 100644 --- a/pulsar-broker-common/src/main/java/org/apache/pulsar/common/configuration/VipStatus.java +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/common/configuration/VipStatus.java @@ -27,10 +27,12 @@ import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response.Status; +import lombok.extern.slf4j.Slf4j; /** * Web resource used by the VIP service to check to availability of the service instance. */ +@Slf4j @Path("/status.html") public class VipStatus { @@ -41,7 +43,6 @@ public class VipStatus { protected ServletContext servletContext; @GET - @Context public String checkStatus() { String statusFilePath = (String) servletContext.getAttribute(ATTRIBUTE_STATUS_FILE_PATH); @SuppressWarnings("unchecked") @@ -55,6 +56,7 @@ public String checkStatus() { return "OK"; } } + log.warn("Failed to access \"status.html\". The service is not ready"); throw new WebApplicationException(Status.NOT_FOUND); } diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/package-info.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/package-info.java new file mode 100644 index 0000000000000..f01bd9198f568 --- /dev/null +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/package-info.java @@ -0,0 +1,19 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.jetty; diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/tls/JettySslContextFactory.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/tls/JettySslContextFactory.java new file mode 100644 index 0000000000000..514fa4a07250e --- /dev/null +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/tls/JettySslContextFactory.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.jetty.tls; + +import java.util.Set; +import javax.net.ssl.SSLContext; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.common.util.DefaultSslContextBuilder; +import org.apache.pulsar.common.util.SecurityUtility; +import org.apache.pulsar.common.util.SslContextAutoRefreshBuilder; +import org.apache.pulsar.common.util.keystoretls.NetSslContextBuilder; +import org.eclipse.jetty.util.ssl.SslContextFactory; + +@Slf4j +public class JettySslContextFactory { + static { + // DO NOT EDIT - Load Conscrypt provider + if (SecurityUtility.CONSCRYPT_PROVIDER != null) { + } + } + + public static SslContextFactory.Server createServerSslContextWithKeystore(String sslProviderString, + String keyStoreTypeString, + String keyStore, + String keyStorePassword, + boolean allowInsecureConnection, + String trustStoreTypeString, + String trustStore, + String trustStorePassword, + boolean requireTrustedClientCertOnConnect, + Set ciphers, + Set protocols, + long certRefreshInSec) { + NetSslContextBuilder sslCtxRefresher = new NetSslContextBuilder( + sslProviderString, + keyStoreTypeString, + keyStore, + keyStorePassword, + allowInsecureConnection, + trustStoreTypeString, + trustStore, + trustStorePassword, + requireTrustedClientCertOnConnect, + certRefreshInSec); + + return new JettySslContextFactory.Server(sslProviderString, sslCtxRefresher, + requireTrustedClientCertOnConnect, ciphers, protocols); + } + + public static SslContextFactory createServerSslContext(String sslProviderString, boolean tlsAllowInsecureConnection, + String tlsTrustCertsFilePath, + String tlsCertificateFilePath, + String tlsKeyFilePath, + boolean tlsRequireTrustedClientCertOnConnect, + Set ciphers, + Set protocols, + long certRefreshInSec) { + DefaultSslContextBuilder sslCtxRefresher = + new DefaultSslContextBuilder(tlsAllowInsecureConnection, tlsTrustCertsFilePath, tlsCertificateFilePath, + tlsKeyFilePath, tlsRequireTrustedClientCertOnConnect, certRefreshInSec, sslProviderString); + + return new JettySslContextFactory.Server(sslProviderString, sslCtxRefresher, + tlsRequireTrustedClientCertOnConnect, ciphers, protocols); + } + + private static class Server extends SslContextFactory.Server { + private final SslContextAutoRefreshBuilder sslCtxRefresher; + + public Server(String sslProviderString, SslContextAutoRefreshBuilder sslCtxRefresher, + boolean requireTrustedClientCertOnConnect, Set ciphers, Set protocols) { + super(); + this.sslCtxRefresher = sslCtxRefresher; + + if (ciphers != null && ciphers.size() > 0) { + this.setIncludeCipherSuites(ciphers.toArray(new String[0])); + } + + if (protocols != null && protocols.size() > 0) { + this.setIncludeProtocols(protocols.toArray(new String[0])); + } + + if (sslProviderString != null && !sslProviderString.equals("")) { + setProvider(sslProviderString); + } + + if (requireTrustedClientCertOnConnect) { + this.setNeedClientAuth(true); + this.setTrustAll(false); + } else { + this.setWantClientAuth(true); + this.setTrustAll(true); + } + } + + @Override + public SSLContext getSslContext() { + return sslCtxRefresher.get(); + } + } +} diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/tls/package-info.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/tls/package-info.java new file mode 100644 index 0000000000000..8978699ff7926 --- /dev/null +++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/jetty/tls/package-info.java @@ -0,0 +1,19 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.jetty.tls; diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasicTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasicTest.java new file mode 100644 index 0000000000000..ef7dca23e550f --- /dev/null +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderBasicTest.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.authentication; + +import static org.testng.Assert.assertEquals; +import com.google.common.io.Resources; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Base64; +import java.util.Properties; +import javax.naming.AuthenticationException; +import lombok.Cleanup; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.common.api.AuthData; +import org.testng.annotations.Test; + +public class AuthenticationProviderBasicTest { + private final String basicAuthConf = Resources.getResource("authentication/basic/.htpasswd").getPath(); + private final String basicAuthConfBase64 = Base64.getEncoder().encodeToString(Files.readAllBytes(Paths.get(basicAuthConf))); + + public AuthenticationProviderBasicTest() throws IOException { + } + + private void testAuthenticate(AuthenticationProviderBasic provider) throws AuthenticationException { + AuthData authData = AuthData.of("superUser2:superpassword".getBytes(StandardCharsets.UTF_8)); + provider.newAuthState(authData, null, null); + } + + @Test + public void testLoadFileFromPulsarProperties() throws Exception { + @Cleanup + AuthenticationProviderBasic provider = new AuthenticationProviderBasic(); + ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); + Properties properties = new Properties(); + properties.setProperty("basicAuthConf", basicAuthConf); + serviceConfiguration.setProperties(properties); + provider.initialize(serviceConfiguration); + testAuthenticate(provider); + } + + @Test + public void testLoadBase64FromPulsarProperties() throws Exception { + @Cleanup + AuthenticationProviderBasic provider = new AuthenticationProviderBasic(); + ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); + Properties properties = new Properties(); + properties.setProperty("basicAuthConf", basicAuthConfBase64); + serviceConfiguration.setProperties(properties); + provider.initialize(serviceConfiguration); + testAuthenticate(provider); + } + + @Test + public void testLoadFileFromSystemProperties() throws Exception { + @Cleanup + AuthenticationProviderBasic provider = new AuthenticationProviderBasic(); + ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); + System.setProperty("pulsar.auth.basic.conf", basicAuthConf); + provider.initialize(serviceConfiguration); + testAuthenticate(provider); + } + + @Test + public void testLoadBase64FromSystemProperties() throws Exception { + @Cleanup + AuthenticationProviderBasic provider = new AuthenticationProviderBasic(); + ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); + System.setProperty("pulsar.auth.basic.conf", basicAuthConfBase64); + provider.initialize(serviceConfiguration); + testAuthenticate(provider); + } + + @Test + public void testReadData() throws Exception { + byte[] data = Files.readAllBytes(Paths.get(basicAuthConf)); + String base64Data = Base64.getEncoder().encodeToString(data); + + // base64 format + assertEquals(AuthenticationProviderBasic.readData("data:;base64," + base64Data), data); + assertEquals(AuthenticationProviderBasic.readData(base64Data), data); + + // file format + assertEquals(AuthenticationProviderBasic.readData("file://" + basicAuthConf), data); + assertEquals(AuthenticationProviderBasic.readData(basicAuthConf), data); + } +} diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTokenTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTokenTest.java index 6582a6ab69d89..7d6404c3a32a6 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTokenTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authentication/AuthenticationProviderTokenTest.java @@ -18,13 +18,13 @@ */ package org.apache.pulsar.broker.authentication; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; -import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - import com.google.common.collect.Lists; import io.jsonwebtoken.Claims; import io.jsonwebtoken.Jwt; @@ -33,26 +33,24 @@ import io.jsonwebtoken.SignatureAlgorithm; import io.jsonwebtoken.io.Decoders; import io.jsonwebtoken.security.Keys; -import java.security.Key; -import java.util.Arrays; -import java.util.List; -import lombok.Cleanup; - import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; +import java.security.Key; import java.security.KeyPair; import java.security.PrivateKey; import java.sql.Date; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Optional; import java.util.Properties; import java.util.concurrent.TimeUnit; - import javax.crypto.SecretKey; import javax.naming.AuthenticationException; - +import javax.servlet.http.HttpServletRequest; +import lombok.Cleanup; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; import org.apache.pulsar.common.api.AuthData; @@ -240,7 +238,7 @@ public void testAuthSecretKeyFromFile() throws Exception { AuthenticationProviderToken provider = new AuthenticationProviderToken(); Properties properties = new Properties(); - properties.setProperty(AuthenticationProviderToken.CONF_TOKEN_SECRET_KEY, "file://" + secretKeyFile.toString()); + properties.setProperty(AuthenticationProviderToken.CONF_TOKEN_SECRET_KEY, "file:///" + secretKeyFile.toString().replace('\\', '/')); ServiceConfiguration conf = new ServiceConfiguration(); conf.setProperties(properties); @@ -849,4 +847,55 @@ public String getCommandData() { assertEquals(subject, SUBJECT); provider.close(); } + + @Test + public void testTokenFromHttpParams() throws Exception { + SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + + @Cleanup + AuthenticationProviderToken provider = new AuthenticationProviderToken(); + + Properties properties = new Properties(); + properties.setProperty(AuthenticationProviderToken.CONF_TOKEN_SECRET_KEY, + AuthTokenUtils.encodeKeyBase64(secretKey)); + + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setProperties(properties); + provider.initialize(conf); + + String token = AuthTokenUtils.createToken(secretKey, SUBJECT, Optional.empty()); + HttpServletRequest servletRequest = mock(HttpServletRequest.class); + doReturn(token).when(servletRequest).getParameter("token"); + doReturn(null).when(servletRequest).getHeader("Authorization"); + doReturn("127.0.0.1").when(servletRequest).getRemoteAddr(); + doReturn(0).when(servletRequest).getRemotePort(); + + AuthenticationDataHttps authenticationDataHttps = new AuthenticationDataHttps(servletRequest); + provider.authenticate(authenticationDataHttps); + } + + @Test + public void testTokenFromHttpHeaders() throws Exception { + SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + + @Cleanup + AuthenticationProviderToken provider = new AuthenticationProviderToken(); + + Properties properties = new Properties(); + properties.setProperty(AuthenticationProviderToken.CONF_TOKEN_SECRET_KEY, + AuthTokenUtils.encodeKeyBase64(secretKey)); + + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setProperties(properties); + provider.initialize(conf); + + String token = AuthTokenUtils.createToken(secretKey, SUBJECT, Optional.empty()); + HttpServletRequest servletRequest = mock(HttpServletRequest.class); + doReturn("Bearer " + token).when(servletRequest).getHeader("Authorization"); + doReturn("127.0.0.1").when(servletRequest).getRemoteAddr(); + doReturn(0).when(servletRequest).getRemotePort(); + + AuthenticationDataHttps authenticationDataHttps = new AuthenticationDataHttps(servletRequest); + provider.authenticate(authenticationDataHttps); + } } diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java index fdedf864c488c..078e2aad07aca 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/authorization/MultiRolesTokenAuthorizationProviderTest.java @@ -18,10 +18,14 @@ */ package org.apache.pulsar.broker.authorization; +import static org.mockito.Mockito.mock; import io.jsonwebtoken.Jwts; import io.jsonwebtoken.SignatureAlgorithm; +import java.util.Properties; +import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.broker.resources.PulsarResources; import org.junit.Assert; import org.testng.annotations.Test; @@ -56,7 +60,9 @@ public String getHttpHeader(String name) { }; Assert.assertTrue(provider.authorize(ads, role -> { - if (role.equals(userB)) return CompletableFuture.completedFuture(true); // only userB has permission + if (role.equals(userB)) { + return CompletableFuture.completedFuture(true); // only userB has permission + } return CompletableFuture.completedFuture(false); }).get()); @@ -65,7 +71,130 @@ public String getHttpHeader(String name) { }).get()); Assert.assertFalse(provider.authorize(ads, role -> { - return CompletableFuture.completedFuture(false); // only users has no permission + return CompletableFuture.completedFuture(false); // all users has no permission + }).get()); + } + + @Test + public void testMultiRolesAuthzWithEmptyRoles() throws Exception { + SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + String token = Jwts.builder().claim("sub", new String[]{}).signWith(secretKey).compact(); + + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + + AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + Assert.assertFalse(provider.authorize(ads, role -> CompletableFuture.completedFuture(false)).get()); + } + + @Test + public void testMultiRolesAuthzWithSingleRole() throws Exception { + SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + String testRole = "test-role"; + String token = Jwts.builder().claim("sub", testRole).signWith(secretKey).compact(); + + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + + AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + Assert.assertTrue(provider.authorize(ads, role -> { + if (role.equals(testRole)) { + return CompletableFuture.completedFuture(true); + } + return CompletableFuture.completedFuture(false); + }).get()); + } + + @Test + public void testMultiRolesNotFailNonJWT() throws Exception { + String token = "a-non-jwt-token"; + + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + + AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + Assert.assertFalse(provider.authorize(ads, role -> CompletableFuture.completedFuture(false)).get()); + } + + @Test + public void testMultiRolesAuthzWithCustomRolesClaims() throws Exception { + SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + String testRole = "test-role"; + String customRolesClaims = "role"; + String token = Jwts.builder().claim(customRolesClaims, new String[]{testRole}).signWith(secretKey).compact(); + + Properties properties = new Properties(); + properties.setProperty("tokenSettingPrefix", "prefix_"); + properties.setProperty("prefix_tokenAuthClaim", customRolesClaims); + ServiceConfiguration conf = new ServiceConfiguration(); + conf.setProperties(properties); + + MultiRolesTokenAuthorizationProvider provider = new MultiRolesTokenAuthorizationProvider(); + provider.initialize(conf, mock(PulsarResources.class)); + + AuthenticationDataSource ads = new AuthenticationDataSource() { + @Override + public boolean hasDataFromHttp() { + return true; + } + + @Override + public String getHttpHeader(String name) { + if (name.equals("Authorization")) { + return "Bearer " + token; + } else { + throw new IllegalArgumentException("Wrong HTTP header"); + } + } + }; + + Assert.assertTrue(provider.authorize(ads, role -> { + if (role.equals(testRole)) { + return CompletableFuture.completedFuture(true); + } + return CompletableFuture.completedFuture(false); }).get()); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/exceptions/TransactionPendingAckStoreProviderException.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java similarity index 58% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/exceptions/TransactionPendingAckStoreProviderException.java rename to pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java index 0f41e2cd3c455..6081601b6052e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/exceptions/TransactionPendingAckStoreProviderException.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/resources/NamespaceResourcesTest.java @@ -16,17 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.pendingack.exceptions; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionBufferException; +package org.apache.pulsar.broker.resources; -/** - * Transaction pending ack store provider exception. - */ -public class TransactionPendingAckStoreProviderException extends TransactionBufferException { +import org.junit.Assert; +import org.testng.annotations.Test; - public TransactionPendingAckStoreProviderException(String message) { - super(message); - } +public class NamespaceResourcesTest { + @Test + public void test_pathIsFromNamespace() { + Assert.assertFalse(NamespaceResources.pathIsFromNamespace("/admin/clusters")); + Assert.assertFalse(NamespaceResources.pathIsFromNamespace("/admin/policies")); + Assert.assertFalse(NamespaceResources.pathIsFromNamespace("/admin/policies/my-tenant")); + Assert.assertTrue(NamespaceResources.pathIsFromNamespace("/admin/policies/my-tenant/my-ns")); + } } \ No newline at end of file diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/validator/MultipleListenerValidatorTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/validator/MultipleListenerValidatorTest.java index 4cd07438d85d2..d7fd802c17cf7 100644 --- a/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/validator/MultipleListenerValidatorTest.java +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/broker/validator/MultipleListenerValidatorTest.java @@ -32,15 +32,6 @@ */ public class MultipleListenerValidatorTest { - @Test(expectedExceptions = IllegalArgumentException.class) - public void testAppearTogether() { - ServiceConfiguration config = new ServiceConfiguration(); - config.setAdvertisedAddress("127.0.0.1"); - config.setAdvertisedListeners("internal:pulsar://192.168.1.11:6660,internal:pulsar+ssl://192.168.1.11:6651"); - config.setInternalListenerName("internal"); - MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); - } - @Test public void testGetAppliedAdvertised() throws Exception { ServiceConfiguration config = new ServiceConfiguration(); @@ -67,6 +58,20 @@ public void testGetAppliedAdvertised() throws Exception { ServiceConfigurationUtils.getDefaultOrConfiguredAddress(null)); } + @Test + public void testListenerDefaulting() { + ServiceConfiguration config = new ServiceConfiguration(); + config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660, internal:pulsar+ssl://127.0.0.1:6651"); + MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); + assertEquals("internal", config.getInternalListenerName()); + } + + @Test(expectedExceptions = IllegalArgumentException.class) + public void testMalformedListener() { + ServiceConfiguration config = new ServiceConfiguration(); + config.setAdvertisedListeners(":pulsar://127.0.0.1:6660"); + MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); + } @Test(expectedExceptions = IllegalArgumentException.class) public void testListenerDuplicate_1() { @@ -86,18 +91,17 @@ public void testListenerDuplicate_2() { } @Test(expectedExceptions = IllegalArgumentException.class) - public void testDifferentListenerWithSameHostPort() { + public void testListenerDuplicate_3() { ServiceConfiguration config = new ServiceConfiguration(); - config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660," + " external:pulsar://127.0.0.1:6660"); + config.setAdvertisedListeners(" internal:pulsar+ssl://127.0.0.1:6661," + " internal:pulsar+ssl://192.168.1.11:6661"); config.setInternalListenerName("internal"); MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); } - @Test - public void testListenerWithTLSPort() { + @Test(expectedExceptions = IllegalArgumentException.class) + public void testDifferentListenerWithSameHostPort() { ServiceConfiguration config = new ServiceConfiguration(); - config.setBrokerServicePortTls(Optional.of(6651)); - config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660, internal:pulsar+ssl://127.0.0.1:6651"); + config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660," + " external:pulsar://127.0.0.1:6660"); config.setInternalListenerName("internal"); MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); } @@ -105,7 +109,6 @@ public void testListenerWithTLSPort() { @Test(expectedExceptions = IllegalArgumentException.class) public void testWithoutListenerNameInAdvertisedListeners() { ServiceConfiguration config = new ServiceConfiguration(); - config.setBrokerServicePortTls(Optional.of(6651)); config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660, internal:pulsar+ssl://127.0.0.1:6651"); config.setInternalListenerName("external"); MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/jetty/tls/JettySslContextFactoryTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/jetty/tls/JettySslContextFactoryTest.java new file mode 100644 index 0000000000000..c1816674880a7 --- /dev/null +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/jetty/tls/JettySslContextFactoryTest.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.jetty.tls; + +import com.google.common.io.Resources; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import lombok.extern.slf4j.Slf4j; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.pulsar.common.util.SecurityUtility; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.testng.annotations.Test; + +@Slf4j +public class JettySslContextFactoryTest { + + @Test + public void testJettyTlsServerTls() throws Exception { + Server server = new Server(); + List connectors = new ArrayList<>(); + SslContextFactory factory = JettySslContextFactory.createServerSslContext( + null, + false, + Resources.getResource("ssl/my-ca/ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-key.pem").getPath(), + true, + null, + null, + 600); + + ServerConnector connector = new ServerConnector(server, factory); + connector.setPort(0); + connectors.add(connector); + server.setConnectors(connectors.toArray(new ServerConnector[0])); + server.start(); + // client connect + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + RegistryBuilder registryBuilder = RegistryBuilder.create(); + registryBuilder.register("https", + new SSLConnectionSocketFactory(getClientSslContext(), new NoopHostnameVerifier())); + PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); + httpClientBuilder.setConnectionManager(cm); + CloseableHttpClient httpClient = httpClientBuilder.build(); + HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); + httpClient.execute(httpGet); + httpClient.close(); + server.stop(); + } + + @Test(expectedExceptions = SSLHandshakeException.class) + public void testJettyTlsServerInvalidTlsProtocol() throws Exception { + Server server = new Server(); + List connectors = new ArrayList<>(); + SslContextFactory factory = JettySslContextFactory.createServerSslContext( + null, + false, + Resources.getResource("ssl/my-ca/ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-key.pem").getPath(), + true, + null, + new HashSet() { + { + this.add("TLSv1.3"); + } + }, + 600); + factory.setHostnameVerifier((s, sslSession) -> true); + ServerConnector connector = new ServerConnector(server, factory); + connector.setPort(0); + connectors.add(connector); + server.setConnectors(connectors.toArray(new ServerConnector[0])); + server.start(); + // client connect + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + RegistryBuilder registryBuilder = RegistryBuilder.create(); + registryBuilder.register("https", new SSLConnectionSocketFactory(getClientSslContext(), + new String[]{"TLSv1.2"}, null, new NoopHostnameVerifier())); + PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); + httpClientBuilder.setConnectionManager(cm); + CloseableHttpClient httpClient = httpClientBuilder.build(); + HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); + httpClient.execute(httpGet); + httpClient.close(); + server.stop(); + } + + @Test(expectedExceptions = SSLHandshakeException.class) + public void testJettyTlsServerInvalidCipher() throws Exception { + Server server = new Server(); + List connectors = new ArrayList<>(); + SslContextFactory factory = JettySslContextFactory.createServerSslContext( + null, + false, + Resources.getResource("ssl/my-ca/ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-key.pem").getPath(), + true, + new HashSet() { + { + this.add("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"); + } + }, + new HashSet() { + { + this.add("TLSv1.2"); + } + }, + 600); + + factory.setHostnameVerifier((s, sslSession) -> true); + ServerConnector connector = new ServerConnector(server, factory); + connector.setPort(0); + connectors.add(connector); + server.setConnectors(connectors.toArray(new ServerConnector[0])); + server.start(); + // client connect + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + RegistryBuilder registryBuilder = RegistryBuilder.create(); + registryBuilder.register("https", new SSLConnectionSocketFactory(getClientSslContext(), + new String[]{"TLSv1.2"}, new String[]{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, + new NoopHostnameVerifier())); + PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); + httpClientBuilder.setConnectionManager(cm); + CloseableHttpClient httpClient = httpClientBuilder.build(); + HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); + httpClient.execute(httpGet); + httpClient.close(); + server.stop(); + } + + private static SSLContext getClientSslContext() throws GeneralSecurityException, IOException { + return SecurityUtility.createSslContext( + false, + Resources.getResource("ssl/my-ca/ca.pem").getPath(), + Resources.getResource("ssl/my-ca/client-ca.pem").getPath(), + Resources.getResource("ssl/my-ca/client-key.pem").getPath(), + null + ); + } +} diff --git a/pulsar-broker-common/src/test/java/org/apache/pulsar/jetty/tls/JettySslContextFactoryWithKeyStoreTest.java b/pulsar-broker-common/src/test/java/org/apache/pulsar/jetty/tls/JettySslContextFactoryWithKeyStoreTest.java new file mode 100644 index 0000000000000..292bd123fdfaf --- /dev/null +++ b/pulsar-broker-common/src/test/java/org/apache/pulsar/jetty/tls/JettySslContextFactoryWithKeyStoreTest.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.jetty.tls; + +import com.google.common.io.Resources; +import java.io.FileInputStream; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.TrustManagerFactory; +import lombok.extern.slf4j.Slf4j; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.config.Configurator; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.testng.annotations.Test; + +@Slf4j +public class JettySslContextFactoryWithKeyStoreTest { + + @Test + public void testJettyTlsServerTls() throws Exception { + Server server = new Server(); + List connectors = new ArrayList<>(); + SslContextFactory.Server factory = JettySslContextFactory.createServerSslContextWithKeystore(null, + "JKS", Resources.getResource("ssl/jetty_server_key.jks").getPath(), + "jetty_server_pwd", false, "JKS", + Resources.getResource("ssl/jetty_server_trust.jks").getPath(), + "jetty_server_pwd", true, null, + null, 600); + factory.setHostnameVerifier((s, sslSession) -> true); + ServerConnector connector = new ServerConnector(server, factory); + connector.setPort(0); + connectors.add(connector); + server.setConnectors(connectors.toArray(new ServerConnector[0])); + server.start(); + // client connect + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + RegistryBuilder registryBuilder = RegistryBuilder.create(); + registryBuilder.register("https", + new SSLConnectionSocketFactory(getClientSslContext(), new NoopHostnameVerifier())); + PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); + httpClientBuilder.setConnectionManager(cm); + CloseableHttpClient httpClient = httpClientBuilder.build(); + HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); + httpClient.execute(httpGet); + httpClient.close(); + server.stop(); + } + + @Test(expectedExceptions = SSLHandshakeException.class) + public void testJettyTlsServerInvalidTlsProtocol() throws Exception { + Configurator.setRootLevel(Level.INFO); + Server server = new Server(); + List connectors = new ArrayList<>(); + SslContextFactory.Server factory = JettySslContextFactory.createServerSslContextWithKeystore(null, + "JKS", Resources.getResource("ssl/jetty_server_key.jks").getPath(), + "jetty_server_pwd", false, "JKS", + Resources.getResource("ssl/jetty_server_trust.jks").getPath(), + "jetty_server_pwd", true, null, + new HashSet() { + { + this.add("TLSv1.3"); + } + }, 600); + factory.setHostnameVerifier((s, sslSession) -> true); + ServerConnector connector = new ServerConnector(server, factory); + connector.setPort(0); + connectors.add(connector); + server.setConnectors(connectors.toArray(new ServerConnector[0])); + server.start(); + // client connect + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + RegistryBuilder registryBuilder = RegistryBuilder.create(); + registryBuilder.register("https", new SSLConnectionSocketFactory(getClientSslContext(), + new String[]{"TLSv1.2"}, null, new NoopHostnameVerifier())); + PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); + httpClientBuilder.setConnectionManager(cm); + CloseableHttpClient httpClient = httpClientBuilder.build(); + HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); + httpClient.execute(httpGet); + httpClient.close(); + server.stop(); + } + + @Test(expectedExceptions = SSLHandshakeException.class) + public void testJettyTlsServerInvalidCipher() throws Exception { + Server server = new Server(); + List connectors = new ArrayList<>(); + SslContextFactory.Server factory = JettySslContextFactory.createServerSslContextWithKeystore(null, + "JKS", Resources.getResource("ssl/jetty_server_key.jks").getPath(), + "jetty_server_pwd", false, "JKS", + Resources.getResource("ssl/jetty_server_trust.jks").getPath(), + "jetty_server_pwd", true, new HashSet() { + { + this.add("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"); + } + }, + new HashSet() { + { + this.add("TLSv1.2"); + } + }, 600); + factory.setHostnameVerifier((s, sslSession) -> true); + ServerConnector connector = new ServerConnector(server, factory); + connector.setPort(0); + connectors.add(connector); + server.setConnectors(connectors.toArray(new ServerConnector[0])); + server.start(); + // client connect + HttpClientBuilder httpClientBuilder = HttpClients.custom(); + RegistryBuilder registryBuilder = RegistryBuilder.create(); + registryBuilder.register("https", new SSLConnectionSocketFactory(getClientSslContext(), + new String[]{"TLSv1.2"}, new String[]{"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, + new NoopHostnameVerifier())); + PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); + httpClientBuilder.setConnectionManager(cm); + CloseableHttpClient httpClient = httpClientBuilder.build(); + HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); + httpClient.execute(httpGet); + httpClient.close(); + server.stop(); + } + + private static SSLContext getClientSslContext() { + return getSslContext(Resources.getResource("ssl/jetty_client_key.jks").getPath(), + "jetty_client_pwd", + Resources.getResource("ssl/jetty_client_trust.jks").getPath(), + "jetty_client_pwd"); + } + + private static SSLContext getSslContext(String keyStorePath, String keyStorePassword, + String trustStorePath, String trustStorePassword) { + try { + SSLContext sslContext = SSLContext.getInstance("TLS"); + // key store + KeyManagerFactory keyManagerFactory = + KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + KeyStore keyStore = KeyStore.getInstance("JKS"); + try (FileInputStream inputStream = new FileInputStream(keyStorePath)) { + keyStore.load(inputStream, keyStorePassword.toCharArray()); + } + keyManagerFactory.init(keyStore, keyStorePassword.toCharArray()); + KeyManager[] keyManagers = keyManagerFactory.getKeyManagers(); + // trust store + TrustManagerFactory trustManagerFactory = + TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + KeyStore trustStore = KeyStore.getInstance("JKS"); + try (FileInputStream inputStream = new FileInputStream(trustStorePath)) { + trustStore.load(inputStream, trustStorePassword.toCharArray()); + } + trustManagerFactory.init(trustStore); + sslContext.init(keyManagers, trustManagerFactory.getTrustManagers(), new SecureRandom()); + return sslContext; + } catch (Exception e) { + log.error("load ssl context error ", e); + return null; + } + } + +} \ No newline at end of file diff --git a/pulsar-broker-common/src/test/resources/authentication/basic/.htpasswd b/pulsar-broker-common/src/test/resources/authentication/basic/.htpasswd new file mode 100644 index 0000000000000..b1a099a5f0ecb --- /dev/null +++ b/pulsar-broker-common/src/test/resources/authentication/basic/.htpasswd @@ -0,0 +1,2 @@ +superUser:mQQQIsyvvKRtU +superUser2:$apr1$foobarmq$kuSZlLgOITksCkRgl57ie/ diff --git a/pulsar-broker-common/src/test/resources/ssl/jetty_client_key.jks b/pulsar-broker-common/src/test/resources/ssl/jetty_client_key.jks new file mode 100644 index 0000000000000..2b8ea64347ddc Binary files /dev/null and b/pulsar-broker-common/src/test/resources/ssl/jetty_client_key.jks differ diff --git a/pulsar-broker-common/src/test/resources/ssl/jetty_client_trust.jks b/pulsar-broker-common/src/test/resources/ssl/jetty_client_trust.jks new file mode 100644 index 0000000000000..166a2e00fb371 Binary files /dev/null and b/pulsar-broker-common/src/test/resources/ssl/jetty_client_trust.jks differ diff --git a/pulsar-broker-common/src/test/resources/ssl/jetty_server_key.jks b/pulsar-broker-common/src/test/resources/ssl/jetty_server_key.jks new file mode 100644 index 0000000000000..b6189b75c8ad0 Binary files /dev/null and b/pulsar-broker-common/src/test/resources/ssl/jetty_server_key.jks differ diff --git a/pulsar-broker-common/src/test/resources/ssl/jetty_server_trust.jks b/pulsar-broker-common/src/test/resources/ssl/jetty_server_trust.jks new file mode 100644 index 0000000000000..b09cc030a71c3 Binary files /dev/null and b/pulsar-broker-common/src/test/resources/ssl/jetty_server_trust.jks differ diff --git a/pulsar-broker-common/src/test/resources/ssl/my-ca/ca.pem b/pulsar-broker-common/src/test/resources/ssl/my-ca/ca.pem new file mode 100644 index 0000000000000..3d5a80e234784 --- /dev/null +++ b/pulsar-broker-common/src/test/resources/ssl/my-ca/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9DCCAdygAwIBAgIUNbNkV2+K2Hf4Q1V5gdAENZQiLokwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAxMGUHVsc2FyMCAXDTIyMDExNDA0MjgwMFoYDzIxMjIwMTE2 +MDQyODAwWjARMQ8wDQYDVQQDEwZQdWxzYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDBR2K5EKVziLqdsz78efEW4lOwKiJ32e97uxn1Z6oKgkgImpVP +Z9aoJB4EwSnDg+6FV2YULdWPm7C6W33tDmWRaU/Hlo/cOejnK8UmiMu/EyDpE2Wj +n0RimGmwOkBi2IWIcIzWMmPDZ9kZc65OUeEmwZedKRy62PQyfCeNU4OOHQn3PXjI +NbXJZD5TvBmn4SJn2RP9EgmIPaBAh/Mng045ZeHHLhwMKC8EOyHc2aB7AL6brymR +xzsiYWdcJn4mqqMvT82mVvhkgAMOcR4CXYF8eYnsG6ZbDHb13CawcvLVREJZk7AB +XZi9Rd5xczxHILM8rdkIZfunaG1X5hbih5wJAgMBAAGjQjBAMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTCC1lYG+62cUPjNk9q4jCm +Ps65njANBgkqhkiG9w0BAQsFAAOCAQEAKV2Lpu5cH5EsG53EWsYxEKvuQZ0LTxCE +wCDf/NxJaQbzfv0tsbZatMge0vcZ/5r8tZZoOC+pGTwk6MaRbEFH8PmvlH1LIQvu +Y34/YQZOy8wBTWwaIfFMnYWc0iAFoFt2Lzuq+GOI+svTFp729Ae8r7UxY/f9Lioc +ttdGr7vA6PpcIMoEIPjVp+m41uL9IDfX8eOxg4gVlwtqpbHdTzMrOz0YY+3qH/WK +6Qffw4pwitzAEj2zCn2lvGC5cbpd13SAaqtB3xL/Aet0SS2r3g9qDo1RruQhXUng +06U/Hqtn5K1fNQv3pivi3Jg5z1DfJWHkH37luAoIlOZHRmPK6rhp/g== +-----END CERTIFICATE----- diff --git a/pulsar-broker-common/src/test/resources/ssl/my-ca/client-ca.pem b/pulsar-broker-common/src/test/resources/ssl/my-ca/client-ca.pem new file mode 100644 index 0000000000000..adcae3393ade1 --- /dev/null +++ b/pulsar-broker-common/src/test/resources/ssl/my-ca/client-ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgSgAwIBAgIUJJpmKX3DnbUwJ7tUhCt8MTiwz0owDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAxMGUHVsc2FyMCAXDTIyMDExNDA0MjgwMFoYDzIxMjExMjIx +MDQyODAwWjARMQ8wDQYDVQQDEwZQdWxzYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDZN+CNZ1i1WaXulbwSASOfXErWXhGV9DHqavPp3DohgQdundfS +648T/X80uWQlyxu4L4j0oc97jtzc1AyZFXj5nocVsveEO9aDjnYCc5NdBNJLQHgl +IO59fEpTd55NO24g9a8/sxgn0ADCenMlngk1Ou+2QJBONw7W12/WUSUg6ICe+b+x +qPzgApue16oGw9HxhPwa3oEvVZrEnFIWLjsSWtezhgFHMCH9/ngk0KlRyes/EZCz +ZgkO5mgii2fmNDg+yuWUfw7Q0x6BJskGIrxisJiJBRR1+DIvJqgqxJsNmeeEQrZK +YHBukj5RWDFOpOHgqFbPsv45sVKoLrGFrMnNAgMBAAGjajBoMA4GA1UdDwEB/wQE +AwIFoDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBSwkx93xjYP4I+dcFF3xS9NLesmFjAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJ +KoZIhvcNAQELBQADggEBAAK3ZF63w46pT76QIOeSM3ocUm6izvW/IrxLUESfgRC4 +gg0/5VfPiHHUe6orn15KuPXHe7xCUFqc2oFn5aIU1B/6iOPeNItvMJidU0a3UAiw +hFK9MSFgESNBiEnu1dE5tPcIIxTyCFQ/8loeY3dsdcNVoguH/2J9v/XcMMga46A1 +wudaaa1nb+ZYnXkRuyObKVJQN7EqC+4edinMOTPBbF9wtRMAMBRHXXENXb9zFthi +Dbdn4YvadYsNHxh5ar+hQn/HSPMuCUPY/uUqxtBagb6aS0YnSoUscSLs1Jizg5NX +d+QV8X/5E6W4xWnptUZwVxOemkdnr6A8MH1eQKKFZTM= +-----END CERTIFICATE----- diff --git a/pulsar-broker-common/src/test/resources/ssl/my-ca/client-key.pem b/pulsar-broker-common/src/test/resources/ssl/my-ca/client-key.pem new file mode 100644 index 0000000000000..5b08b151c8094 --- /dev/null +++ b/pulsar-broker-common/src/test/resources/ssl/my-ca/client-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDZN+CNZ1i1WaXu +lbwSASOfXErWXhGV9DHqavPp3DohgQdundfS648T/X80uWQlyxu4L4j0oc97jtzc +1AyZFXj5nocVsveEO9aDjnYCc5NdBNJLQHglIO59fEpTd55NO24g9a8/sxgn0ADC +enMlngk1Ou+2QJBONw7W12/WUSUg6ICe+b+xqPzgApue16oGw9HxhPwa3oEvVZrE +nFIWLjsSWtezhgFHMCH9/ngk0KlRyes/EZCzZgkO5mgii2fmNDg+yuWUfw7Q0x6B +JskGIrxisJiJBRR1+DIvJqgqxJsNmeeEQrZKYHBukj5RWDFOpOHgqFbPsv45sVKo +LrGFrMnNAgMBAAECggEATeVZ45uiFja16J9NuG8sJSPluoY1bD8L/3KnUcAmIImy +7powIXVT8+k+StwI6/ywThbN2FyGmVqcHZz1f5hRr8KH0uJBHOyQetEFxM9Jk1v9 +Rfsymq36mImP5erJnAyp66vvUrqY+P4Ap71duam4x5wBBqyUk1fvPGA5vPOQiwHs +TN9JHizGobY25fpigWKIMamyE7HWXEUzVdOo83ZiNx53ths+WcF/kqto2v5LtyfJ +HgoPocfZI8tRz9tfgc8zOkvyjsvgdd6rLhd0r2oExnyQBJdktGFpQZMGambU328u +NqcdJscjP/HWAHRzuSdOvCMOEn8E5GIjcWEnQqOmSQKBgQDcpb655/UdcVxrv2Ou +8juucDJMpf6i/UcmlXVXx+3zGSuQZcCC2fupe3JcxPdK7bo65YlC3OoRihggh2sS +cnFMNHMfyoE3G/doXIr3QyL9UAQt4yb+7Nz7jRXYcg4Ytv+FVS6BSzIDEK17v+es +GuWDM3JwtigtzYS4tRh7lgmuBwKBgQD8BXp7yIyVv657B8OJJSoeGataziFPhZux +WKoS3gq24169ZWXwLc+nwrdgvBNrRaHuX+cYh93RF9+2WZrRcRL41XqN938adasY +zPsfOJa9IOgUzQtGUMSe1/WqvHfcvqZCqYq4u/LSdf+I67woP4tCqqn4E928aIZb +6PjLH+dUiwKBgH1ntn7y1t1lEKIspPtJsaHzIqNttMvuKAJF7+t0Nkl0hM4NBt1Y +BzDMeLNBP0vW0YGn89uMs3xEgHH8hV52rO4i4UuwTMCFpJgsAM+H2NsgHz/1WrSI +6xANn9zk9h4V5CRjxYq2sjYLxI4RBBtNLiTjmKd24F8n78cLJl8XZ2kBAoGAGoHF +ATH1v2ZaxqvpYApdpK7UfAeEL2YBGyUVNkjOXbAKbec1Uo6u8ZkkSnNdo4G+Z2EE +4Gqh5PUa3YYNJ4w6D5v8eOQYJUNNDJ26p+z+xcOpRU7PqcSi+YYDW8LY5InU2NwW +MBnsj0BD8TXCI4WTcx6aI/KK9t8TiqU1Tb/8R8MCgYANVinOLz2enB+Qzu4o88W/ +witKHI3D9+z/uWjp0Q4rwmr3OL4FD9vZWvL4qwbDgpfLirJ4e3UVfN1/FoytAKlk +Kykf8oDWciCIdxStt/yUpgQv78IL3vM5d9B8Qb7KCRtJ0BIXGJ7Gle3xJeuduZLe ++F+hwI3Dpv5HPqa9o6ttJw== +-----END PRIVATE KEY----- diff --git a/pulsar-broker-common/src/test/resources/ssl/my-ca/server-ca.pem b/pulsar-broker-common/src/test/resources/ssl/my-ca/server-ca.pem new file mode 100644 index 0000000000000..df5f69298e258 --- /dev/null +++ b/pulsar-broker-common/src/test/resources/ssl/my-ca/server-ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgSgAwIBAgIUVQHD0/oi9Ca50HA7DFLYOO2wEzYwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAxMGUHVsc2FyMCAXDTIyMDExNDA0MjgwMFoYDzIxMjExMjIx +MDQyODAwWjARMQ8wDQYDVQQDEwZQdWxzYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDBcqDkMhjLd9ik//UQijqbajQP5t6dvVZNn9gODQrS9oB/URur +NzCcPWYPJZfEJlTkV8mlmgq4dBjwghpy5ALOGiERk55JPIN4cy01hQ6j7YSPFvMv +BjqZvm5dpGDNTr7GY7THegMM1wpk9EaUOm7tBOHtf6ZnANjSMcQM74RCSBt0Koqw +06CKVDCbgJ5NNE1LgwYeVQAwtQAhY8rqqQKJvCorFbq7OiisFBnz5pRBT6N4kMo1 +9LZo3Oe2F2w9eH9vacQ0NjSOCNXqal9Xl/Pwy9JgKKppwZ/3nCgRc+yfjrnkRz0f +b+llb2NpR5Ge+tNMakqelE8bDSw/5BPjRPftAgMBAAGjajBoMA4GA1UdDwEB/wQE +AwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBRXws5mmLbW+xOLflUyUZ0I0uN96zAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJ +KoZIhvcNAQELBQADggEBAKMklpYJIkp4icz9Ea5wWQiRXWb94lGdyCA833VHeGB2 +fKvNXj1d6lEiy26pOjhDmycroKelj70WqOsqVgi4xh4Y9sj6pwb8Q423Tu3qNO1k +qaScTar2DANSigNzqlSbLshPWQ2ZyDwkvZPuqPgHzOXekzbUGwxgCiySaQkl2mCS +mBaG3XnESwiMIKkLphEv0MAvTVaImbSRWYEQ4OECwcHXxx+14wK8NLcdDIHcSzki +8Eq24CxDOeL5QxciGMi5tylsdCpT+D/BXTKiu46yoRjXUsTLYL53yUZZIqQ3A4CV +enZ/vHhP0Ev9RcRigFTqrBm7EC3b2AUpvqgRMnPwQZo= +-----END CERTIFICATE----- diff --git a/pulsar-broker-common/src/test/resources/ssl/my-ca/server-key.pem b/pulsar-broker-common/src/test/resources/ssl/my-ca/server-key.pem new file mode 100644 index 0000000000000..a3f3a36b73c37 --- /dev/null +++ b/pulsar-broker-common/src/test/resources/ssl/my-ca/server-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDBcqDkMhjLd9ik +//UQijqbajQP5t6dvVZNn9gODQrS9oB/URurNzCcPWYPJZfEJlTkV8mlmgq4dBjw +ghpy5ALOGiERk55JPIN4cy01hQ6j7YSPFvMvBjqZvm5dpGDNTr7GY7THegMM1wpk +9EaUOm7tBOHtf6ZnANjSMcQM74RCSBt0Koqw06CKVDCbgJ5NNE1LgwYeVQAwtQAh +Y8rqqQKJvCorFbq7OiisFBnz5pRBT6N4kMo19LZo3Oe2F2w9eH9vacQ0NjSOCNXq +al9Xl/Pwy9JgKKppwZ/3nCgRc+yfjrnkRz0fb+llb2NpR5Ge+tNMakqelE8bDSw/ +5BPjRPftAgMBAAECggEBAJm2JsgMUo1ihn/dbnIdFCKoCgRUs7FtYCVADOJlVKN7 +AXGpFi4/JV4Qn4cLnQNcXfovE2iF9VzJy4NYLgH60YvJUVtxC8Yv0lukUVkEiDST +p9A3MTa9YVUG7xVzZwPcPVTQpzYV6lSKjpTXUTm5EKk/RvJ7itKv5plmt9x7eYFb +/JwqXo1Z6C4gfIFR85LWmrCsNUK5T9oooLz88D6+ZH3+fWlr75RDff2kqdLshMTs +N0Ov7NXcRFeruFs/IPrgTxjBMeNa2LFdYVPeeQ41L4uOI49uVBAmSn1be+THvDoj +Do+6wTEF/h6/VLoOaIFZZdHlqd4is+xcEg8gwVkCn2ECgYEAxqVvGKc9qaqEVwBx +U5Ru9OFx0NqEBvkYZRbCg1REcMFd3lqFTHvHiF3pmCp0XgLJKYuy42618IJXhj6D +Y15/p9jX0025MpnH/AdwpO6x5pv6gb/JOMnHOnq8sI3R+V6TVsv1WZj0sOj94mF0 ++Od++bQkUnSlfE4X7v+cJfo/Q8UCgYEA+Uz1yOyI9Dv1dEdBMdBA8MTriYU0uJCV +dVKzL/uC9XyguVBWu1HX0MvEKyjPRycvLB7TuQqAFLgCtC8EEuPGBpWtyXOm9Jxw +ToCfUZFuBQeMuf4vZcFgJjiEKTdKBxrvjkhyIhPR6JAy0WUr8Ry+ZtqvmG5NOEz5 +ptm1tznYngkCgYEAlckeyV8p/uqF2biKu3QcamgoU0zB6yQfAfK0fySmasNTzZtC +EhbvsOLnhgbVMiI1ny8ol5fedtlBuAchOWeDKIQ40as0r3QHuQG/LY6S9Im+zeFY +kIqNwInWB+cYYkmvHe6zNXlBYLh+4BmOgzTDqPPtw4MTWXTlVSDGlFhrJeUCgYBX +7rlS4Xt9ChkNpoRsWZROWGbr3rw1zWmqND1X01Lh28+lDZ1J/RguYXET+BUEd+G/ +oi/zuKxsomrxuxOoxgZ3FBx0TgK5jORgDCYl0zIHPB57DBkTvx123cBf+Ux3LR0K +BqubMXp8mUATc6gIJ6dRCBmfnmhGT4BPRcM+mXy6YQKBgGEGH37VABus+Oi3g1bk +qEAaUI1asRLJIfbY2ImxEroLIQAbTFuIQUsZTKpT7jJZubjYvy1Fev0LU/n7Kv2w +7ym41z70ro5uxwUBfJjnF3RtgncNcftn4b3siNzvBfKEBuhegMeS5YAbBIwABUpR +4mVpm9BLOiX4yENIT6JdUQFc +-----END PRIVATE KEY----- diff --git a/pulsar-broker-shaded/pom.xml b/pulsar-broker-shaded/pom.xml index bf1e78bba0249..2f759e0371eff 100644 --- a/pulsar-broker-shaded/pom.xml +++ b/pulsar-broker-shaded/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-broker/pom.xml b/pulsar-broker/pom.xml index fd11162d8ea3b..b26d74265dba4 100644 --- a/pulsar-broker/pom.xml +++ b/pulsar-broker/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -312,6 +312,11 @@ hppc + + org.roaringbitmap + RoaringBitmap + + ${project.groupId} pulsar-functions-api-examples @@ -568,6 +573,14 @@ test-jar test + + + ${project.groupId} + pulsar-metadata + ${project.version} + test-jar + test + diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarStandalone.java b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarStandalone.java index ded8addd29d56..aaafd65755b38 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/PulsarStandalone.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/PulsarStandalone.java @@ -301,7 +301,7 @@ public void start() throws Exception { broker.start(); final String cluster = config.getClusterName(); - final AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config); + final AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config, "pulsar"); if (!config.isTlsEnabled()) { checkArgument(config.getWebServicePort().isPresent(), "webServicePort must be present"); checkArgument(internalListener.getBrokerServiceUrl() != null, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java index 996bf225892af..ce91ecf907cb2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/BookKeeperClientFactoryImpl.java @@ -29,7 +29,6 @@ import java.io.IOException; import java.util.Map; import java.util.Optional; -import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import lombok.extern.slf4j.Slf4j; @@ -42,6 +41,7 @@ import org.apache.bookkeeper.stats.NullStatsLogger; import org.apache.bookkeeper.stats.StatsLogger; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.zookeeper.ZkBookieRackAffinityMapping; @@ -81,7 +81,6 @@ public BookKeeper create(ServiceConfiguration conf, ZooKeeper zkClient, EventLoo try { return BookKeeper.forConfig(bkConf) .allocator(PulsarByteBufAllocator.DEFAULT) - .setZookeeper(zkClient) .eventLoopGroup(eventLoopGroup) .statsLogger(statsLogger) .build(); @@ -148,15 +147,11 @@ ClientConfiguration createBkClientConfiguration(ServiceConfiguration conf) { conf.getBookkeeperClientGetBookieInfoIntervalSeconds(), TimeUnit.SECONDS); bkConf.setGetBookieInfoRetryIntervalSeconds( conf.getBookkeeperClientGetBookieInfoRetryIntervalSeconds(), TimeUnit.SECONDS); - Properties allProps = conf.getProperties(); - allProps.forEach((key, value) -> { - String sKey = key.toString(); - if (sKey.startsWith("bookkeeper_") && value != null) { - String bkExtraConfigKey = sKey.substring(11); - log.info("Extra BookKeeper client configuration {}, setting {}={}", sKey, bkExtraConfigKey, value); - bkConf.setProperty(bkExtraConfigKey, value); - } - }); + PropertiesUtils.filterAndMapProperties(conf.getProperties(), "bookkeeper_") + .forEach((key, value) -> { + log.info("Applying BookKeeper client configuration setting {}={}", key, value); + bkConf.setProperty(key, value); + }); return bkConf; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/ManagedLedgerClientFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/ManagedLedgerClientFactory.java index b615628c08fa1..174933b9cbd9c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/ManagedLedgerClientFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/ManagedLedgerClientFactory.java @@ -71,6 +71,7 @@ public void initialize(ServiceConfiguration conf, MetadataStoreExtended metadata managedLedgerFactoryConfig.setCursorPositionFlushSeconds(conf.getManagedLedgerCursorPositionFlushSeconds()); managedLedgerFactoryConfig.setManagedLedgerInfoCompressionType(conf.getManagedLedgerInfoCompressionType()); managedLedgerFactoryConfig.setStatsPeriodSeconds(conf.getManagedLedgerStatsPeriodSeconds()); + managedLedgerFactoryConfig.setManagedCursorInfoCompressionType(conf.getManagedCursorInfoCompressionType()); Configuration configuration = new ClientConfiguration(); if (conf.isBookkeeperClientExposeStatsToPrometheus()) { @@ -83,7 +84,8 @@ public void initialize(ServiceConfiguration conf, MetadataStoreExtended metadata statsProvider.start(configuration); StatsLogger statsLogger = statsProvider.getStatsLogger("pulsar_managedLedger_client"); - this.defaultBkClient = bookkeeperProvider.create(conf, zkClient, eventLoopGroup, Optional.empty(), null); + this.defaultBkClient = + bookkeeperProvider.create(conf, zkClient, eventLoopGroup, Optional.empty(), null, statsLogger); BookkeeperFactoryForCustomEnsemblePlacementPolicy bkFactory = ( EnsemblePlacementPolicyConfig ensemblePlacementPolicyConfig) -> { @@ -94,7 +96,7 @@ public void initialize(ServiceConfiguration conf, MetadataStoreExtended metadata try { return bookkeeperProvider.create(conf, zkClient, eventLoopGroup, Optional.ofNullable(ensemblePlacementPolicyConfig.getPolicyClass()), - ensemblePlacementPolicyConfig.getProperties()); + ensemblePlacementPolicyConfig.getProperties(), statsLogger); } catch (Exception e) { log.error("Failed to initialize bk-client for policy {}, properties {}", ensemblePlacementPolicyConfig.getPolicyClass(), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java index 14f5003f379b6..c10e6acc2a8f5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/PulsarService.java @@ -22,7 +22,7 @@ import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.pulsar.broker.resourcegroup.ResourceUsageTransportManager.DISABLE_RESOURCE_USAGE_TRANSPORT_MANAGER; -import static org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl.TRANSACTION_LOG_PREFIX; +import static org.apache.pulsar.common.naming.TopicName.TRANSACTION_COORDINATOR_LOG; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; @@ -89,7 +89,6 @@ import org.apache.pulsar.broker.loadbalance.LoadReportUpdaterTask; import org.apache.pulsar.broker.loadbalance.LoadResourceQuotaUpdaterTask; import org.apache.pulsar.broker.loadbalance.LoadSheddingTask; -import org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.protocol.ProtocolHandlers; import org.apache.pulsar.broker.resourcegroup.ResourceGroupService; @@ -127,12 +126,14 @@ import org.apache.pulsar.client.api.transaction.TransactionBufferClient; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.impl.conf.ConfigurationDataUtils; +import org.apache.pulsar.client.internal.PropertiesUtils; +import org.apache.pulsar.client.util.ExecutorProvider; import org.apache.pulsar.common.conf.InternalConfigurationData; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; import org.apache.pulsar.common.configuration.VipStatus; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceName; -import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl; @@ -245,7 +246,7 @@ public class PulsarService implements AutoCloseable, ShutdownService { private AdditionalServlets brokerAdditionalServlets; // packages management service - private PackagesManagement packagesManagement; + private Optional packagesManagement = Optional.empty(); private PrometheusMetricsServlet metricsServlet; private List pendingMetricsProviders; @@ -259,7 +260,7 @@ public class PulsarService implements AutoCloseable, ShutdownService { private PulsarResources pulsarResources; private TransactionPendingAckStoreProvider transactionPendingAckStoreProvider; - private final ScheduledExecutorService transactionReplayExecutor; + private final ExecutorProvider transactionExecutorProvider; public enum State { Init, Started, Closing, Closed @@ -272,6 +273,8 @@ public enum State { private volatile CompletableFuture closeFuture; // key is listener name , value is pulsar address and pulsar ssl address private Map advertisedListeners; + private NamespaceName heartbeatNamespaceV2; + private NamespaceName heartbeatNamespaceV1; public PulsarService(ServiceConfiguration config) { this(config, Optional.empty(), (exitCode) -> { @@ -315,11 +318,10 @@ public PulsarService(ServiceConfiguration config, new DefaultThreadFactory("zk-cache-callback")); if (config.isTransactionCoordinatorEnabled()) { - this.transactionReplayExecutor = Executors.newScheduledThreadPool( - config.getNumTransactionReplayThreadPoolSize(), - new DefaultThreadFactory("transaction-replay")); + this.transactionExecutorProvider = new ExecutorProvider(this.getConfiguration() + .getNumTransactionReplayThreadPoolSize(), "pulsar-transaction-executor"); } else { - this.transactionReplayExecutor = null; + this.transactionExecutorProvider = null; } this.ioEventLoopGroup = EventLoopUtil.newEventLoopGroup(config.getNumIOThreads(), config.isEnableBusyWait(), @@ -422,7 +424,16 @@ public CompletableFuture closeAsync() { List> asyncCloseFutures = new ArrayList<>(); if (this.brokerService != null) { - asyncCloseFutures.add(this.brokerService.closeAsync()); + CompletableFuture brokerCloseFuture = this.brokerService.closeAsync(); + if (this.transactionMetadataStoreService != null) { + asyncCloseFutures.add(brokerCloseFuture.whenComplete((__, ___) -> { + // close transactionMetadataStoreService after the broker has been closed + this.transactionMetadataStoreService.close(); + this.transactionMetadataStoreService = null; + })); + } else { + asyncCloseFutures.add(brokerCloseFuture); + } this.brokerService = null; } @@ -495,8 +506,8 @@ public CompletableFuture closeAsync() { configurationMetadataStore.close(); } - if (transactionReplayExecutor != null) { - transactionReplayExecutor.shutdown(); + if (transactionExecutorProvider != null) { + transactionExecutorProvider.shutdownNow(); } ioEventLoopGroup.shutdownGracefully(); @@ -583,6 +594,7 @@ public void start() throws PulsarServerException { LOG.info("Starting Pulsar Broker service; version: '{}'", (brokerVersion != null ? brokerVersion : "unknown")); LOG.info("Git Revision {}", PulsarVersion.getGitSha()); + LOG.info("Git Branch {}", PulsarVersion.getGitBranch()); LOG.info("Built by {} on {} at {}", PulsarVersion.getBuildUser(), PulsarVersion.getBuildHost(), @@ -674,6 +686,8 @@ config, localMetadataStore, getZkClient(), this.addWebServerHandlers(webService, metricsServlet, this.config); this.webService.start(); + heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(this.advertisedAddress, this.config); + heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(this.advertisedAddress, this.config); // Refresh addresses and update configuration, since the port might have been dynamically assigned if (config.getBrokerServicePort().equals(Optional.of(0))) { @@ -687,6 +701,7 @@ config, localMetadataStore, getZkClient(), this.brokerServiceUrl = brokerUrl(config); this.brokerServiceUrlTls = brokerUrlTls(config); + if (null != this.webSocketService) { ClusterDataImpl clusterData = ClusterDataImpl.builder() .serviceUrl(webServiceAddress) @@ -718,7 +733,9 @@ config, localMetadataStore, getZkClient(), this.transactionBufferSnapshotService = new SystemTopicBaseTxnBufferSnapshotService(getClient()); this.transactionTimer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-transaction-timer")); - transactionBufferClient = TransactionBufferClientImpl.create(getClient(), transactionTimer); + transactionBufferClient = TransactionBufferClientImpl.create(this, transactionTimer, + config.getTransactionBufferClientMaxConcurrentRequests(), + config.getTransactionBufferClientOperationTimeoutInMills()); transactionMetadataStoreService = new TransactionMetadataStoreService(TransactionMetadataStoreProvider .newProvider(config.getTransactionMetadataStoreProviderClassName()), this, @@ -1025,7 +1042,7 @@ protected void startLoadManagementService() throws PulsarServerException { if (config.isLoadBalancerEnabled()) { LOG.info("Starting load balancer"); if (this.loadReportTask == null) { - long loadReportMinInterval = LoadManagerShared.LOAD_REPORT_UPDATE_MINIMUM_INTERVAL; + long loadReportMinInterval = config.getLoadBalancerReportUpdateMinIntervalMillis(); this.loadReportTask = this.loadManagerExecutor.scheduleAtFixedRate( new LoadReportUpdaterTask(loadManager), loadReportMinInterval, loadReportMinInterval, TimeUnit.MILLISECONDS); @@ -1195,32 +1212,37 @@ public LedgerOffloader getManagedLedgerOffloader(NamespaceName namespaceName, Of }); } - public synchronized LedgerOffloader createManagedLedgerOffloader(OffloadPoliciesImpl offloadPolicies) + public LedgerOffloader createManagedLedgerOffloader(OffloadPoliciesImpl offloadPolicies) throws PulsarServerException { try { if (StringUtils.isNotBlank(offloadPolicies.getManagedLedgerOffloadDriver())) { checkNotNull(offloadPolicies.getOffloadersDirectory(), "Offloader driver is configured to be '%s' but no offloaders directory is configured.", offloadPolicies.getManagedLedgerOffloadDriver()); - Offloaders offloaders = offloadersCache.getOrLoadOffloaders( - offloadPolicies.getOffloadersDirectory(), config.getNarExtractionDirectory()); - - LedgerOffloaderFactory offloaderFactory = offloaders.getOffloaderFactory( - offloadPolicies.getManagedLedgerOffloadDriver()); - try { - return offloaderFactory.create( - offloadPolicies, - ImmutableMap.of( - LedgerOffloader.METADATA_SOFTWARE_VERSION_KEY.toLowerCase(), PulsarVersion.getVersion(), - LedgerOffloader.METADATA_SOFTWARE_GITSHA_KEY.toLowerCase(), PulsarVersion.getGitSha() - ), - schemaStorage, - getOffloaderScheduler(offloadPolicies)); - } catch (IOException ioe) { - throw new PulsarServerException(ioe.getMessage(), ioe.getCause()); + synchronized (this) { + Offloaders offloaders = offloadersCache.getOrLoadOffloaders( + offloadPolicies.getOffloadersDirectory(), config.getNarExtractionDirectory()); + + LedgerOffloaderFactory offloaderFactory = offloaders.getOffloaderFactory( + offloadPolicies.getManagedLedgerOffloadDriver()); + try { + return offloaderFactory.create( + offloadPolicies, + ImmutableMap.of( + LedgerOffloader.METADATA_SOFTWARE_VERSION_KEY.toLowerCase(), + PulsarVersion.getVersion(), + LedgerOffloader.METADATA_SOFTWARE_GITSHA_KEY.toLowerCase(), + PulsarVersion.getGitSha(), + LedgerOffloader.METADATA_PULSAR_CLUSTER_NAME.toLowerCase(), + config.getClusterName() + ), + schemaStorage, getOffloaderScheduler(offloadPolicies)); + } catch (IOException ioe) { + throw new PulsarServerException(ioe.getMessage(), ioe.getCause()); + } } } else { - LOG.info("No ledger offloader configured, using NULL instance"); + LOG.debug("No ledger offloader configured, using NULL instance"); return NullLedgerOffloader.INSTANCE; } } catch (Throwable t) { @@ -1246,8 +1268,8 @@ public ScheduledExecutorService getCacheExecutor() { return cacheExecutor; } - public ScheduledExecutorService getTransactionReplayExecutor() { - return transactionReplayExecutor; + public ExecutorProvider getTransactionExecutorProvider() { + return transactionExecutorProvider; } public ScheduledExecutorService getLoadManagerExecutor() { @@ -1289,16 +1311,19 @@ public Compactor newCompactor() throws PulsarServerException { } public synchronized Compactor getCompactor() throws PulsarServerException { - return getCompactor(true); - } - - public synchronized Compactor getCompactor(boolean shouldInitialize) throws PulsarServerException { - if (this.compactor == null && shouldInitialize) { + if (this.compactor == null) { this.compactor = newCompactor(); } return this.compactor; } + // This method is used for metrics, which is allowed to as null + // Because it's no operation on the compactor, so let's remove the synchronized on this method + // to avoid unnecessary lock competition. + public Compactor getNullableCompactor() { + return this.compactor; + } + protected synchronized OrderedScheduler getOffloaderScheduler(OffloadPoliciesImpl offloadPolicies) { if (this.offloaderScheduler == null) { this.offloaderScheduler = OrderedScheduler.newSchedulerBuilder() @@ -1311,13 +1336,24 @@ protected synchronized OrderedScheduler getOffloaderScheduler(OffloadPoliciesImp public synchronized PulsarClient getClient() throws PulsarServerException { if (this.client == null) { try { - ClientConfigurationData conf = new ClientConfigurationData(); - conf.setServiceUrl(this.getConfiguration().isTlsEnabled() - ? this.brokerServiceUrlTls : this.brokerServiceUrl); - conf.setTlsAllowInsecureConnection(this.getConfiguration().isTlsAllowInsecureConnection()); - conf.setTlsTrustCertsFilePath(this.getConfiguration().getTlsCertificateFilePath()); - - if (this.getConfiguration().isBrokerClientTlsEnabled()) { + ClientConfigurationData initialConf = new ClientConfigurationData(); + initialConf.setStatsIntervalSeconds(0); + + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + Map overrides = PropertiesUtils + .filterAndMapProperties(this.getConfiguration().getProperties(), "brokerClient_"); + ClientConfigurationData conf = + ConfigurationDataUtils.loadData(overrides, initialConf, ClientConfigurationData.class); + + boolean tlsEnabled = this.getConfiguration().isBrokerClientTlsEnabled(); + conf.setServiceUrl(tlsEnabled ? this.brokerServiceUrlTls : this.brokerServiceUrl); + + if (tlsEnabled) { + conf.setTlsCiphers(this.getConfiguration().getBrokerClientTlsCiphers()); + conf.setTlsProtocols(this.getConfiguration().getBrokerClientTlsProtocols()); + conf.setTlsAllowInsecureConnection(this.getConfiguration().isTlsAllowInsecureConnection()); if (this.getConfiguration().isBrokerClientTlsEnabledWithKeyStore()) { conf.setUseKeyStoreTls(true); conf.setTlsTrustStoreType(this.getConfiguration().getBrokerClientTlsTrustStoreType()); @@ -1339,8 +1375,6 @@ public synchronized PulsarClient getClient() throws PulsarServerException { this.getConfiguration().getBrokerClientAuthenticationPlugin(), this.getConfiguration().getBrokerClientAuthenticationParameters())); } - - conf.setStatsIntervalSeconds(0); this.client = new PulsarClientImpl(conf, ioEventLoopGroup); } catch (Exception e) { throw new PulsarServerException(e); @@ -1360,12 +1394,20 @@ public synchronized PulsarAdmin getAdminClient() throws PulsarServerException { + ", webServiceAddressTls: " + webServiceAddressTls + ", webServiceAddress: " + webServiceAddress); } - PulsarAdminBuilder builder = PulsarAdmin.builder().serviceHttpUrl(adminApiUrl) // - .authentication(// - conf.getBrokerClientAuthenticationPlugin(), // - conf.getBrokerClientAuthenticationParameters()); + PulsarAdminBuilder builder = PulsarAdmin.builder().serviceHttpUrl(adminApiUrl); + + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + builder.loadConf(PropertiesUtils.filterAndMapProperties(config.getProperties(), "brokerClient_")); + + builder.authentication( + conf.getBrokerClientAuthenticationPlugin(), + conf.getBrokerClientAuthenticationParameters()); if (conf.isBrokerClientTlsEnabled()) { + builder.tlsCiphers(config.getBrokerClientTlsCiphers()) + .tlsProtocols(config.getBrokerClientTlsProtocols()); if (conf.isBrokerClientTlsEnabledWithKeyStore()) { builder.useKeyStoreTls(true) .tlsTrustStoreType(conf.getBrokerClientTlsTrustStoreType()) @@ -1411,8 +1453,8 @@ public TransactionBufferClient getTransactionBufferClient() { * Gets the broker service URL (non-TLS) associated with the internal listener. */ protected String brokerUrl(ServiceConfiguration config) { - AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config); - return internalListener != null && internalListener.getBrokerServiceUrl() != null + AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config, "pulsar"); + return internalListener.getBrokerServiceUrl() != null ? internalListener.getBrokerServiceUrl().toString() : null; } @@ -1424,8 +1466,8 @@ public static String brokerUrl(String host, int port) { * Gets the broker service URL (TLS) associated with the internal listener. */ public String brokerUrlTls(ServiceConfiguration config) { - AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config); - return internalListener != null && internalListener.getBrokerServiceUrlTls() != null + AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config, "pulsar+ssl"); + return internalListener.getBrokerServiceUrlTls() != null ? internalListener.getBrokerServiceUrlTls().toString() : null; } @@ -1435,7 +1477,10 @@ public static String brokerUrlTls(String host, int port) { public String webAddress(ServiceConfiguration config) { if (config.getWebServicePort().isPresent()) { - return webAddress(ServiceConfigurationUtils.getWebServiceAddress(config), getListenPortHTTP().get()); + AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config, "http"); + return internalListener.getBrokerHttpUrl() != null + ? internalListener.getBrokerHttpUrl().toString() + : webAddress(ServiceConfigurationUtils.getWebServiceAddress(config), getListenPortHTTP().get()); } else { return null; } @@ -1447,7 +1492,10 @@ public static String webAddress(String host, int port) { public String webAddressTls(ServiceConfiguration config) { if (config.getWebServicePortTls().isPresent()) { - return webAddressTls(ServiceConfigurationUtils.getWebServiceAddress(config), getListenPortHTTPS().get()); + AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(config, "https"); + return internalListener.getBrokerHttpsUrl() != null + ? internalListener.getBrokerHttpsUrl().toString() + : webAddressTls(ServiceConfigurationUtils.getWebServiceAddress(config), getListenPortHTTPS().get()); } else { return null; } @@ -1544,16 +1592,22 @@ private void startWorkerService(AuthenticationService authenticationService, } } + public PackagesManagement getPackagesManagement() throws UnsupportedOperationException { + return packagesManagement.orElseThrow(() -> new UnsupportedOperationException("Package Management Service " + + "is not enabled in the broker.")); + } + private void startPackagesManagementService() throws IOException { // TODO: using provider to initialize the packages management service. - this.packagesManagement = new PackagesManagementImpl(); + PackagesManagement packagesManagementService = new PackagesManagementImpl(); + this.packagesManagement = Optional.of(packagesManagementService); PackagesStorageProvider storageProvider = PackagesStorageProvider .newProvider(config.getPackagesManagementStorageProvider()); DefaultPackagesStorageConfiguration storageConfiguration = new DefaultPackagesStorageConfiguration(); storageConfiguration.setProperty(config.getProperties()); PackagesStorage storage = storageProvider.getStorage(storageConfiguration); storage.initialize(); - packagesManagement.initialize(storage); + packagesManagementService.initialize(storage); } public Optional getListenPortHTTP() { @@ -1648,11 +1702,16 @@ public void shutdownNow() { } - private static boolean isTransactionSystemTopic(TopicName topicName) { + public static boolean isTransactionSystemTopic(TopicName topicName) { String topic = topicName.toString(); return topic.startsWith(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString()) - || topic.startsWith(TopicName.get(TopicDomain.persistent.value(), - NamespaceName.SYSTEM_NAMESPACE, TRANSACTION_LOG_PREFIX).toString()) + || topic.startsWith(TRANSACTION_COORDINATOR_LOG.toString()) + || topic.endsWith(MLPendingAckStore.PENDING_ACK_STORE_SUFFIX); + } + + public static boolean isTransactionInternalName(TopicName topicName) { + String topic = topicName.toString(); + return topic.startsWith(TRANSACTION_COORDINATOR_LOG.toString()) || topic.endsWith(MLPendingAckStore.PENDING_ACK_STORE_SUFFIX); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java index 31a87144465dc..902546958c54e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java @@ -18,25 +18,32 @@ */ package org.apache.pulsar.broker; +import static org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl.getMLTransactionLogName; import static org.apache.pulsar.transaction.coordinator.proto.TxnStatus.ABORTING; import static org.apache.pulsar.transaction.coordinator.proto.TxnStatus.COMMITTING; import com.google.common.annotations.VisibleForTesting; import io.netty.util.HashedWheelTimer; import io.netty.util.Timer; -import java.util.ArrayList; +import io.netty.util.concurrent.DefaultThreadFactory; import java.util.Collections; import java.util.Deque; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.pulsar.broker.namespace.NamespaceBundleOwnershipListener; import org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException; -import org.apache.pulsar.broker.transaction.buffer.exceptions.UnsupportedTxnActionException; +import org.apache.pulsar.broker.transaction.exception.coordinator.TransactionCoordinatorException; import org.apache.pulsar.broker.transaction.recover.TransactionRecoverTrackerImpl; import org.apache.pulsar.broker.transaction.timeout.TransactionTimeoutTrackerFactoryImpl; import org.apache.pulsar.client.api.PulsarClientException.BrokerPersistenceException; @@ -63,7 +70,6 @@ import org.apache.pulsar.transaction.coordinator.exceptions.CoordinatorException.CoordinatorNotFoundException; import org.apache.pulsar.transaction.coordinator.exceptions.CoordinatorException.InvalidTxnStatusException; import org.apache.pulsar.transaction.coordinator.exceptions.CoordinatorException.TransactionMetadataStoreStateException; -import org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl; import org.apache.pulsar.transaction.coordinator.proto.TxnStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,9 +90,14 @@ public class TransactionMetadataStoreService { // one connect request open the transactionMetaStore the other request will add to the queue, when the open op // finished the request will be poll and complete the future private final ConcurrentLongHashMap>> pendingConnectRequests; + private final ExecutorService internalPinnedExecutor; private static final long HANDLE_PENDING_CONNECT_TIME_OUT = 30000L; + private final ThreadFactory threadFactory = + new DefaultThreadFactory("transaction-coordinator-thread-factory"); + + public TransactionMetadataStoreService(TransactionMetadataStoreProvider transactionMetadataStoreProvider, PulsarService pulsarService, TransactionBufferClient tbClient, HashedWheelTimer timer) { @@ -96,8 +107,10 @@ public TransactionMetadataStoreService(TransactionMetadataStoreProvider transact this.tbClient = tbClient; this.timeoutTrackerFactory = new TransactionTimeoutTrackerFactoryImpl(this, timer); this.transactionOpRetryTimer = timer; - this.tcLoadSemaphores = new ConcurrentLongHashMap<>(); - this.pendingConnectRequests = new ConcurrentLongHashMap<>(); + this.tcLoadSemaphores = ConcurrentLongHashMap.newBuilder().build(); + this.pendingConnectRequests = + ConcurrentLongHashMap.>>newBuilder().build(); + this.internalPinnedExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory); } @Deprecated @@ -152,86 +165,97 @@ public boolean test(NamespaceBundle namespaceBundle) { } public CompletableFuture handleTcClientConnect(TransactionCoordinatorID tcId) { - if (stores.get(tcId) != null) { - return CompletableFuture.completedFuture(null); - } else { - return pulsarService.getBrokerService().checkTopicNsOwnership(TopicName - .TRANSACTION_COORDINATOR_ASSIGN.getPartition((int) tcId.getId()).toString()).thenCompose(v -> { - CompletableFuture completableFuture = new CompletableFuture<>(); - final Semaphore tcLoadSemaphore = this.tcLoadSemaphores - .computeIfAbsent(tcId.getId(), (id) -> new Semaphore(1)); - Deque> deque = pendingConnectRequests - .computeIfAbsent(tcId.getId(), (id) -> new ConcurrentLinkedDeque<>()); - if (tcLoadSemaphore.tryAcquire()) { - // when tcLoadSemaphore.release(), this command will acquire semaphore, so we should jude the store - // exist again. - if (stores.get(tcId) != null) { - return CompletableFuture.completedFuture(null); - } - - openTransactionMetadataStore(tcId).thenAccept((store) -> { - stores.put(tcId, store); - LOG.info("Added new transaction meta store {}", tcId); - long endTime = System.currentTimeMillis() + HANDLE_PENDING_CONNECT_TIME_OUT; - while (true) { - // prevent thread in a busy loop. - if (System.currentTimeMillis() < endTime) { - CompletableFuture future = deque.poll(); - if (future != null) { - // complete queue request future - future.complete(null); - } else { - break; - } - } else { - deque.clear(); - break; - } + CompletableFuture completableFuture = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (stores.get(tcId) != null) { + completableFuture.complete(null); + } else { + pulsarService.getBrokerService().checkTopicNsOwnership(TopicName + .TRANSACTION_COORDINATOR_ASSIGN.getPartition((int) tcId.getId()).toString()) + .thenRun(() -> internalPinnedExecutor.execute(() -> { + final Semaphore tcLoadSemaphore = this.tcLoadSemaphores + .computeIfAbsent(tcId.getId(), (id) -> new Semaphore(1)); + Deque> deque = pendingConnectRequests + .computeIfAbsent(tcId.getId(), (id) -> new ConcurrentLinkedDeque<>()); + if (tcLoadSemaphore.tryAcquire()) { + // when tcLoadSemaphore.release(), this command will acquire semaphore, + // so we should jude the store exist again. + if (stores.get(tcId) != null) { + completableFuture.complete(null); + tcLoadSemaphore.release(); + return; } - completableFuture.complete(null); - tcLoadSemaphore.release(); - }).exceptionally(e -> { - completableFuture.completeExceptionally(e.getCause()); - // release before handle request queue, in order to client reconnect infinite loop - tcLoadSemaphore.release(); - long endTime = System.currentTimeMillis() + HANDLE_PENDING_CONNECT_TIME_OUT; - while (true) { - // prevent thread in a busy loop. - if (System.currentTimeMillis() < endTime) { - CompletableFuture future = deque.poll(); - if (future != null) { - // this means that this tc client connection connect fail - future.completeExceptionally(e); + openTransactionMetadataStore(tcId).thenAccept((store) -> internalPinnedExecutor.execute(() -> { + stores.put(tcId, store); + LOG.info("Added new transaction meta store {}", tcId); + long endTime = System.currentTimeMillis() + HANDLE_PENDING_CONNECT_TIME_OUT; + while (true) { + // prevent thread in a busy loop. + if (System.currentTimeMillis() < endTime) { + CompletableFuture future = deque.poll(); + if (future != null) { + // complete queue request future + future.complete(null); + } else { + break; + } } else { + deque.clear(); break; } - } else { - deque.clear(); - break; } + + completableFuture.complete(null); + tcLoadSemaphore.release(); + })).exceptionally(e -> { + internalPinnedExecutor.execute(() -> { + completableFuture.completeExceptionally(e.getCause()); + // release before handle request queue, + //in order to client reconnect infinite loop + tcLoadSemaphore.release(); + long endTime = System.currentTimeMillis() + HANDLE_PENDING_CONNECT_TIME_OUT; + while (true) { + // prevent thread in a busy loop. + if (System.currentTimeMillis() < endTime) { + CompletableFuture future = deque.poll(); + if (future != null) { + // this means that this tc client connection connect fail + future.completeExceptionally(e); + } else { + break; + } + } else { + deque.clear(); + break; + } + } + LOG.error("Add transaction metadata store with id {} error", tcId.getId(), e); + }); + return null; + }); + } else { + // only one command can open transaction metadata store, + // other will be added to the deque, when the op of openTransactionMetadataStore finished + // then handle the requests witch in the queue + deque.add(completableFuture); + if (LOG.isDebugEnabled()) { + LOG.debug("Handle tc client connect added into pending queue! tcId : {}", tcId.toString()); } - LOG.error("Add transaction metadata store with id {} error", tcId.getId(), e); - return null; - }); - } else { - // only one command can open transaction metadata store, - // other will be added to the deque, when the op of openTransactionMetadataStore finished - // then handle the requests witch in the queue - deque.add(completableFuture); - if (LOG.isDebugEnabled()) { - LOG.debug("Handle tc client connect added into pending queue! tcId : {}", tcId.toString()); } - } - return completableFuture; - }); - } + })).exceptionally(ex -> { + Throwable realCause = FutureUtil.unwrapCompletionException(ex); + completableFuture.completeExceptionally(realCause); + return null; + }); + } + }); + return completableFuture; } public CompletableFuture openTransactionMetadataStore(TransactionCoordinatorID tcId) { return pulsarService.getBrokerService() - .getManagedLedgerConfig(TopicName.get(MLTransactionLogImpl - .TRANSACTION_LOG_PREFIX + tcId)).thenCompose(v -> { + .getManagedLedgerConfig(getMLTransactionLogName(tcId)).thenCompose(v -> { TransactionTimeoutTracker timeoutTracker = timeoutTrackerFactory.newTracker(tcId); TransactionRecoverTracker recoverTracker = new TransactionRecoverTrackerImpl(TransactionMetadataStoreService.this, @@ -321,7 +345,13 @@ public CompletableFuture updateTxnStatus(TxnID txnId, TxnStatus newStatus, } public CompletableFuture endTransaction(TxnID txnID, int txnAction, boolean isTimeout) { - CompletableFuture completableFuture = new CompletableFuture<>(); + CompletableFuture future = new CompletableFuture<>(); + endTransaction(txnID, txnAction, isTimeout, future); + return future; + } + + public void endTransaction(TxnID txnID, int txnAction, boolean isTimeout, + CompletableFuture future) { TxnStatus newStatus; switch (txnAction) { case TxnAction.COMMIT_VALUE: @@ -331,95 +361,68 @@ public CompletableFuture endTransaction(TxnID txnID, int txnAction, boolea newStatus = ABORTING; break; default: - UnsupportedTxnActionException exception = - new UnsupportedTxnActionException(txnID, txnAction); + TransactionCoordinatorException.UnsupportedTxnActionException exception = + new TransactionCoordinatorException.UnsupportedTxnActionException(txnID, txnAction); LOG.error(exception.getMessage()); - completableFuture.completeExceptionally(exception); - return completableFuture; + future.completeExceptionally(exception); + return; } - - getTxnMeta(txnID).thenAccept(txnMeta -> { - TxnStatus txnStatus = txnMeta.status(); - if (txnStatus == TxnStatus.OPEN) { - updateTxnStatus(txnID, newStatus, TxnStatus.OPEN, isTimeout).thenAccept(v -> - endTxnInTransactionBuffer(txnID, txnAction).thenAccept(a -> - completableFuture.complete(null)).exceptionally(e -> { - if (!isRetryableException(e.getCause())) { - LOG.error("EndTxnInTransactionBuffer fail! TxnId : {}, " - + "TxnAction : {}", txnID, txnAction, e); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("EndTxnInTransactionBuffer retry! TxnId : {}, " - + "TxnAction : {}", txnID, txnAction, e); - } - transactionOpRetryTimer.newTimeout(timeout -> - endTransaction(txnID, txnAction, isTimeout), - endTransactionRetryIntervalTime, TimeUnit.MILLISECONDS); - - } - completableFuture.completeExceptionally(e); - return null; - })).exceptionally(e -> { - if (!isRetryableException(e.getCause())) { - LOG.error("EndTransaction UpdateTxnStatus fail! TxnId : {}, " - + "TxnAction : {}", txnID, txnAction, e); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("EndTransaction UpdateTxnStatus op retry! TxnId : {}, " - + "TxnAction : {}", txnID, txnAction, e); - } - transactionOpRetryTimer.newTimeout(timeout -> endTransaction(txnID, txnAction, isTimeout), - endTransactionRetryIntervalTime, TimeUnit.MILLISECONDS); - + getTxnMeta(txnID) + .thenCompose(txnMeta -> { + if (txnMeta.status() == TxnStatus.OPEN) { + return updateTxnStatus(txnID, newStatus, TxnStatus.OPEN, isTimeout) + .thenCompose(__ -> endTxnInTransactionBuffer(txnID, txnAction)); + } + return fakeAsyncCheckTxnStatus(txnMeta.status(), txnAction, txnID, newStatus) + .thenCompose(__ -> endTxnInTransactionBuffer(txnID, txnAction)); + }).whenComplete((__, ex)-> { + if (ex == null) { + future.complete(null); + return; + } + Throwable realCause = FutureUtil.unwrapCompletionException(ex); + if (!isRetryableException(realCause)) { + LOG.error("End transaction fail! TxnId : {}, " + + "TxnAction : {}", txnID, txnAction, realCause); + future.completeExceptionally(ex); + return; } - completableFuture.completeExceptionally(e); - return null; - }); - } else { - if ((txnStatus == COMMITTING && txnAction == TxnAction.COMMIT.getValue()) - || (txnStatus == ABORTING && txnAction == TxnAction.ABORT.getValue())) { - endTxnInTransactionBuffer(txnID, txnAction).thenAccept(k -> - completableFuture.complete(null)).exceptionally(e -> { - if (isRetryableException(e.getCause())) { - if (LOG.isDebugEnabled()) { - LOG.debug("EndTxnInTransactionBuffer retry! TxnId : {}, " - + "TxnAction : {}", txnID, txnAction, e); - } - transactionOpRetryTimer.newTimeout(timeout -> - endTransaction(txnID, txnAction, isTimeout), - endTransactionRetryIntervalTime, TimeUnit.MILLISECONDS); - } else { - LOG.error("EndTxnInTransactionBuffer fail! TxnId : {}, " - + "TxnAction : {}", txnID, txnAction, e); - } - completableFuture.completeExceptionally(e); - return null; - }); - } else { if (LOG.isDebugEnabled()) { - LOG.debug("EndTxnInTransactionBuffer op retry! TxnId : {}, TxnAction : {}", txnID, txnAction); + LOG.debug("EndTxnInTransactionBuffer retry! TxnId : {}, " + + "TxnAction : {}", txnID, txnAction, realCause); } - completableFuture.completeExceptionally(new InvalidTxnStatusException(txnID, newStatus, txnStatus)); - } - } - }).exceptionally(e -> { - if (isRetryableException(e.getCause())) { - if (LOG.isDebugEnabled()) { - LOG.debug("End transaction op retry! TxnId : {}, TxnAction : {}", txnID, txnAction, e); - } - transactionOpRetryTimer.newTimeout(timeout -> endTransaction(txnID, txnAction, isTimeout), - endTransactionRetryIntervalTime, TimeUnit.MILLISECONDS); + transactionOpRetryTimer.newTimeout(timeout -> + endTransaction(txnID, txnAction, isTimeout, future), + endTransactionRetryIntervalTime, TimeUnit.MILLISECONDS); + }); + } + + private CompletionStage fakeAsyncCheckTxnStatus(TxnStatus txnStatus, int txnAction, + TxnID txnID, TxnStatus expectStatus) { + boolean isLegal; + switch (txnStatus) { + case COMMITTING: + isLegal = (txnAction == TxnAction.COMMIT.getValue()); + break; + case ABORTING: + isLegal = (txnAction == TxnAction.ABORT.getValue()); + break; + default: + isLegal = false; + } + if (!isLegal) { + if (LOG.isDebugEnabled()) { + LOG.debug("EndTxnInTransactionBuffer op retry! TxnId : {}, TxnAction : {}", txnID, txnAction); } - completableFuture.completeExceptionally(e); - return null; - }); - return completableFuture; + return FutureUtil.failedFuture( + new InvalidTxnStatusException(txnID, expectStatus, txnStatus)); + } + return CompletableFuture.completedFuture(null); } // when managedLedger fence will remove this tc and reload public void handleOpFail(Throwable e, TransactionCoordinatorID tcId) { - if (e.getCause() instanceof ManagedLedgerException.ManagedLedgerFencedException - || e instanceof ManagedLedgerException.ManagedLedgerFencedException) { + if (e instanceof ManagedLedgerException.ManagedLedgerFencedException) { removeTransactionMetadataStore(tcId); } } @@ -445,59 +448,42 @@ public void endTransactionForTimeout(TxnID txnID) { } private CompletableFuture endTxnInTransactionBuffer(TxnID txnID, int txnAction) { - CompletableFuture resultFuture = new CompletableFuture<>(); - List> completableFutureList = new ArrayList<>(); - this.getTxnMeta(txnID).whenComplete((txnMeta, throwable) -> { - if (throwable != null) { - resultFuture.completeExceptionally(throwable); - return; - } - long lowWaterMark = getLowWaterMark(txnID); - - txnMeta.ackedPartitions().forEach(tbSub -> { - CompletableFuture actionFuture = new CompletableFuture<>(); - if (TxnAction.COMMIT_VALUE == txnAction) { - actionFuture = tbClient.commitTxnOnSubscription( - tbSub.getTopic(), tbSub.getSubscription(), txnID.getMostSigBits(), - txnID.getLeastSigBits(), lowWaterMark); - } else if (TxnAction.ABORT_VALUE == txnAction) { - actionFuture = tbClient.abortTxnOnSubscription( - tbSub.getTopic(), tbSub.getSubscription(), txnID.getMostSigBits(), - txnID.getLeastSigBits(), lowWaterMark); - } else { - actionFuture.completeExceptionally(new Throwable("Unsupported txnAction " + txnAction)); - } - completableFutureList.add(actionFuture); - }); - - txnMeta.producedPartitions().forEach(partition -> { - CompletableFuture actionFuture = new CompletableFuture<>(); - if (TxnAction.COMMIT_VALUE == txnAction) { - actionFuture = tbClient.commitTxnOnTopic(partition, txnID.getMostSigBits(), - txnID.getLeastSigBits(), lowWaterMark); - } else if (TxnAction.ABORT_VALUE == txnAction) { - actionFuture = tbClient.abortTxnOnTopic(partition, txnID.getMostSigBits(), - txnID.getLeastSigBits(), lowWaterMark); - } else { - actionFuture.completeExceptionally(new Throwable("Unsupported txnAction " + txnAction)); - } - completableFutureList.add(actionFuture); - }); - - try { - FutureUtil.waitForAll(completableFutureList).whenComplete((ignored, waitThrowable) -> { - if (waitThrowable != null) { - resultFuture.completeExceptionally(waitThrowable); - return; - } - resultFuture.complete(null); + return getTxnMeta(txnID) + .thenCompose(txnMeta -> { + long lowWaterMark = getLowWaterMark(txnID); + Stream> onSubFutureStream = txnMeta.ackedPartitions().stream().map(tbSub -> { + switch (txnAction) { + case TxnAction.COMMIT_VALUE: + return tbClient.commitTxnOnSubscription( + tbSub.getTopic(), tbSub.getSubscription(), txnID.getMostSigBits(), + txnID.getLeastSigBits(), lowWaterMark); + case TxnAction.ABORT_VALUE: + return tbClient.abortTxnOnSubscription( + tbSub.getTopic(), tbSub.getSubscription(), txnID.getMostSigBits(), + txnID.getLeastSigBits(), lowWaterMark); + default: + return FutureUtil.failedFuture( + new IllegalStateException("Unsupported txnAction " + txnAction)); + } + }); + Stream> onTopicFutureStream = + txnMeta.producedPartitions().stream().map(partition -> { + switch (txnAction) { + case TxnAction.COMMIT_VALUE: + return tbClient.commitTxnOnTopic(partition, txnID.getMostSigBits(), + txnID.getLeastSigBits(), lowWaterMark); + case TxnAction.ABORT_VALUE: + return tbClient.abortTxnOnTopic(partition, txnID.getMostSigBits(), + txnID.getLeastSigBits(), lowWaterMark); + default: + return FutureUtil.failedFuture( + new IllegalStateException("Unsupported txnAction " + txnAction)); + } + }); + return FutureUtil.waitForAll(Stream.concat(onSubFutureStream, onTopicFutureStream) + .collect(Collectors.toList())) + .thenCompose(__ -> endTxnInTransactionMetadataStore(txnID, txnAction)); }); - } catch (Exception e) { - resultFuture.completeExceptionally(e); - } - }); - - return resultFuture.thenCompose((future) -> endTxnInTransactionMetadataStore(txnID, txnAction)); } private static boolean isRetryableException(Throwable e) { @@ -529,4 +515,18 @@ private TransactionCoordinatorID getTcIdFromTxnId(TxnID txnId) { public Map getStores() { return Collections.unmodifiableMap(stores); } + + public synchronized void close () { + this.internalPinnedExecutor.shutdown(); + stores.forEach((tcId, metadataStore) -> { + metadataStore.closeAsync().whenComplete((v, ex) -> { + if (ex != null) { + LOG.error("Close transaction metadata store with id " + tcId, ex); + } else { + LOG.info("Removed and closed transaction meta store {}", tcId); + } + }); + }); + stores.clear(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java index 625f4191d6c04..415ab2f97d107 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/AdminResource.java @@ -35,11 +35,13 @@ import javax.ws.rs.core.Response.Status; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.BookKeeper; +import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; -import org.apache.pulsar.broker.systopic.SystemTopicClient; +import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.web.PulsarWebResource; import org.apache.pulsar.broker.web.RestException; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.admin.internal.TopicsImpl; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace; import org.apache.pulsar.common.naming.Constants; @@ -54,6 +56,7 @@ import org.apache.pulsar.common.policies.data.PersistencePolicies; import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.RetentionPolicies; +import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.policies.data.SubscribeRate; import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.policies.data.TopicPolicies; @@ -61,6 +64,7 @@ import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.MetadataStoreException.AlreadyExistsException; import org.apache.pulsar.metadata.api.MetadataStoreException.BadVersionException; @@ -193,10 +197,7 @@ protected void validateGlobalNamespaceOwnership() { } catch (IllegalArgumentException e) { throw new RestException(Status.PRECONDITION_FAILED, "Tenant name or namespace is not valid"); } catch (RestException re) { - if (re.getResponse().getStatus() == Status.NOT_FOUND.getStatusCode()) { - throw new RestException(Status.NOT_FOUND, "Namespace not found"); - } - throw new RestException(Status.PRECONDITION_FAILED, "Namespace does not have any clusters configured"); + throw re; } catch (Exception e) { log.warn("Failed to validate global cluster configuration : ns={} emsg={}", namespaceName, e.getMessage()); throw new RestException(Status.SERVICE_UNAVAILABLE, "Failed to validate global cluster configuration"); @@ -285,6 +286,10 @@ protected Policies getNamespacePolicies(NamespaceName namespaceName) { BundlesData bundleData = pulsar().getNamespaceService().getNamespaceBundleFactory() .getBundles(namespaceName).getBundlesData(); policies.bundles = bundleData != null ? bundleData : policies.bundles; + if (policies.is_allow_auto_update_schema == null) { + // the type changed from boolean to Boolean. return broker value here for keeping compatibility. + policies.is_allow_auto_update_schema = pulsar().getConfig().isAllowAutoUpdateSchemaEnabled(); + } return policies; } catch (RestException re) { @@ -516,20 +521,7 @@ protected void validateClusterExists(String cluster) { protected Policies getNamespacePolicies(String tenant, String cluster, String namespace) { NamespaceName ns = NamespaceName.get(tenant, cluster, namespace); - try { - Policies policies = namespaceResources().getPolicies(ns) - .orElseThrow(() -> new RestException(Status.NOT_FOUND, "Namespace does not exist")); - // fetch bundles from LocalZK-policies - BundlesData bundleData = pulsar().getNamespaceService().getNamespaceBundleFactory() - .getBundles(ns).getBundlesData(); - policies.bundles = bundleData != null ? bundleData : policies.bundles; - return policies; - } catch (RestException re) { - throw re; - } catch (Exception e) { - log.error("[{}] Failed to get namespace policies {}", clientAppId(), ns, e); - throw new RestException(e); - } + return getNamespacePolicies(ns); } protected boolean isNamespaceReplicated(NamespaceName namespaceName) { @@ -593,12 +585,12 @@ protected void internalCreatePartitionedTopic(AsyncResponse asyncResponse, int n } // new create check - if (maxTopicsPerNamespace > 0 && !SystemTopicClient.isSystemTopic(topicName)) { + if (maxTopicsPerNamespace > 0 && !pulsar().getBrokerService().isSystemTopic(topicName)) { List partitionedTopics = getTopicPartitionList(TopicDomain.persistent); // exclude created system topic long topicsCount = - partitionedTopics.stream().filter(t -> !SystemTopicClient.isSystemTopic(TopicName.get(t))) - .count(); + partitionedTopics.stream().filter(t -> + !pulsar().getBrokerService().isSystemTopic(TopicName.get(t))).count(); if (topicsCount + numPartitions > maxTopicsPerNamespace) { log.error("[{}] Failed to create partitioned topic {}, " + "exceed maximum number of topics in namespace", clientAppId(), topicName); @@ -632,10 +624,7 @@ protected void internalCreatePartitionedTopic(AsyncResponse asyncResponse, int n return; } - List> createFutureList = new ArrayList<>(); - CompletableFuture createLocalFuture = new CompletableFuture<>(); - createFutureList.add(createLocalFuture); checkTopicExistsAsync(topicName).thenAccept(exists -> { if (exists) { log.warn("[{}] Failed to create already existing topic {}", clientAppId(), topicName); @@ -658,7 +647,13 @@ protected void internalCreatePartitionedTopic(AsyncResponse asyncResponse, int n return null; }); - FutureUtil.waitForAll(createFutureList).whenComplete((ignored, ex) -> { + List replicatedClusters = new ArrayList<>(); + if (!createLocalTopicOnly && topicName.isGlobal() && isNamespaceReplicated(namespaceName)) { + getNamespaceReplicatedClusters(namespaceName) + .stream().filter(cluster -> !cluster.equals(pulsar().getConfiguration().getClusterName())) + .forEach(replicatedClusters::add); + } + createLocalFuture.whenComplete((ignored, ex) -> { if (ex != null) { log.error("[{}] Failed to create partitions for topic {}", clientAppId(), topicName, ex.getCause()); if (ex.getCause() instanceof RestException) { @@ -669,14 +664,20 @@ protected void internalCreatePartitionedTopic(AsyncResponse asyncResponse, int n return; } - if (!createLocalTopicOnly && topicName.isGlobal() && isNamespaceReplicated(namespaceName)) { - getNamespaceReplicatedClusters(namespaceName) - .stream() - .filter(cluster -> !cluster.equals(pulsar().getConfiguration().getClusterName())) - .forEach(cluster -> createFutureList.add( - ((TopicsImpl) pulsar().getBrokerService().getClusterPulsarAdmin(cluster).topics()) + if (!replicatedClusters.isEmpty()) { + replicatedClusters.forEach(cluster -> { + pulsar().getPulsarResources().getClusterResources().getClusterAsync(cluster) + .thenAccept(clusterDataOp -> { + ((TopicsImpl) pulsar().getBrokerService() + .getClusterPulsarAdmin(cluster, clusterDataOp).topics()) .createPartitionedTopicAsync( - topicName.getPartitionedTopicName(), numPartitions, true))); + topicName.getPartitionedTopicName(), numPartitions, true); + }) + .exceptionally(throwable -> { + log.error("Failed to create partition topic in cluster {}.", cluster, throwable); + return null; + }); + }); } log.info("[{}] Successfully created partitions for topic {} in cluster {}", @@ -745,13 +746,32 @@ private CompletableFuture provisionPartitionedTopicPath(AsyncResponse asyn } protected void resumeAsyncResponseExceptionally(AsyncResponse asyncResponse, Throwable throwable) { - if (throwable instanceof WebApplicationException) { - asyncResponse.resume((WebApplicationException) throwable); + Throwable realCause = FutureUtil.unwrapCompletionException(throwable); + if (realCause instanceof WebApplicationException) { + asyncResponse.resume(realCause); + } else if (realCause instanceof BrokerServiceException.NotAllowedException) { + asyncResponse.resume(new RestException(Status.CONFLICT, realCause)); + } else if (realCause instanceof PulsarAdminException) { + asyncResponse.resume(new RestException(((PulsarAdminException) realCause))); } else { - asyncResponse.resume(new RestException(throwable)); + asyncResponse.resume(new RestException(realCause)); } } + protected CompletableFuture getSchemaCompatibilityStrategyAsync() { + return getNamespacePoliciesAsync(namespaceName).thenApply(policies -> { + SchemaCompatibilityStrategy schemaCompatibilityStrategy = policies.schema_compatibility_strategy; + if (SchemaCompatibilityStrategy.isUndefined(schemaCompatibilityStrategy)) { + schemaCompatibilityStrategy = SchemaCompatibilityStrategy.fromAutoUpdatePolicy( + policies.schema_auto_update_compatibility_strategy); + if (SchemaCompatibilityStrategy.isUndefined(schemaCompatibilityStrategy)) { + schemaCompatibilityStrategy = pulsar().getConfig().getSchemaCompatibilityStrategy(); + } + } + return schemaCompatibilityStrategy; + }); + } + @CanIgnoreReturnValue public static T checkNotNull(T reference) { return com.google.common.base.Preconditions.checkNotNull(reference); @@ -763,6 +783,12 @@ protected void checkNotNull(Object o, String errorMessage) { } } + protected boolean isManagedLedgerNotFoundException(Exception e) { + Throwable cause = e.getCause(); + return cause instanceof ManagedLedgerException.MetadataNotFoundException + || cause instanceof MetadataStoreException.NotFoundException; + } + protected void checkArgument(boolean b, String errorMessage) { if (!b) { throw new RestException(Status.BAD_REQUEST, errorMessage); @@ -781,7 +807,7 @@ protected void validatePersistencePolicies(PersistencePolicies persistence) { checkArgument( (persistence.getBookkeeperEnsemble() >= persistence.getBookkeeperWriteQuorum()) && (persistence.getBookkeeperWriteQuorum() >= persistence.getBookkeeperAckQuorum()), - String.format("Bookkeeper Ensemble (%s) >= WriteQuorum (%s) >= AckQuoru (%s)", + String.format("Bookkeeper Ensemble (%s) >= WriteQuorum (%s) >= AckQuorum (%s)", persistence.getBookkeeperEnsemble(), persistence.getBookkeeperWriteQuorum(), persistence.getBookkeeperAckQuorum())); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java index 17f497c2e8fe3..973efa3df49f9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/BrokersBase.java @@ -18,14 +18,18 @@ */ package org.apache.pulsar.broker.admin.impl; +import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun; import com.google.common.collect.Maps; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ApiResponses; import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -46,9 +50,10 @@ import org.apache.pulsar.broker.loadbalance.LeaderBroker; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.Subscription; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.web.PulsarWebResource; import org.apache.pulsar.broker.web.RestException; -import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; @@ -69,6 +74,7 @@ public class BrokersBase extends PulsarWebResource { private static final Logger LOG = LoggerFactory.getLogger(BrokersBase.class); private static final Duration HEALTHCHECK_READ_TIMEOUT = Duration.ofSeconds(10); + public static final String HEALTH_CHECK_TOPIC_SUFFIX = "healthcheck"; @GET @Path("/{cluster}") @@ -181,7 +187,7 @@ public void deleteDynamicConfiguration(@PathParam("configName") String configNam public Map getAllDynamicConfigurations() throws Exception { validateSuperUserAccess(); try { - return dynamicConfigurationResources().getDynamicConfiguration(); + return dynamicConfigurationResources().getDynamicConfiguration().orElseGet(Collections::emptyMap); } catch (RestException e) { LOG.error("[{}] couldn't find any configuration in zk {}", clientAppId(), e.getMessage(), e); throw e; @@ -265,7 +271,7 @@ public InternalConfigurationData getInternalConfigurationData() { @ApiResponse(code = 500, message = "Internal server error")}) public void backlogQuotaCheck(@Suspended AsyncResponse asyncResponse) { validateSuperUserAccess(); - pulsar().getBrokerService().executor().execute(()->{ + pulsar().getBrokerService().getBacklogQuotaChecker().execute(safeRun(()->{ try { pulsar().getBrokerService().monitorBacklogQuota(); asyncResponse.resume(Response.noContent().build()); @@ -273,7 +279,7 @@ public void backlogQuotaCheck(@Suspended AsyncResponse asyncResponse) { LOG.error("trigger backlogQuotaCheck fail", e); asyncResponse.resume(new RestException(e)); } - }); + })); } @GET @@ -292,123 +298,139 @@ public void isReady(@Suspended AsyncResponse asyncResponse) { @GET @Path("/health") - @ApiOperation(value = "Run a healthcheck against the broker") + @ApiOperation(value = "Run a healthCheck against the broker") @ApiResponses(value = { @ApiResponse(code = 200, message = "Everything is OK"), @ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Cluster doesn't exist"), @ApiResponse(code = 500, message = "Internal server error")}) @ApiParam(value = "Topic Version") - public void healthcheck(@Suspended AsyncResponse asyncResponse, - @QueryParam("topicVersion") TopicVersion topicVersion) throws Exception { - String topic; - PulsarClient client; - try { - validateSuperUserAccess(); - NamespaceName heartbeatNamespace = (topicVersion == TopicVersion.V2) - ? - NamespaceService.getHeartbeatNamespaceV2( - pulsar().getAdvertisedAddress(), - pulsar().getConfiguration()) - : - NamespaceService.getHeartbeatNamespace( - pulsar().getAdvertisedAddress(), - pulsar().getConfiguration()); - - - topic = String.format("persistent://%s/healthcheck", heartbeatNamespace); - - LOG.info("Running healthCheck with topic={}", topic); - - client = pulsar().getClient(); - } catch (Exception e) { - LOG.error("Error getting heathcheck topic info", e); - throw new PulsarServerException(e); - } + public void healthCheck(@Suspended AsyncResponse asyncResponse, + @QueryParam("topicVersion") TopicVersion topicVersion) { + validateSuperUserAccess(); + internalRunHealthCheck(topicVersion) + .thenAccept(__ -> { + LOG.info("[{}] Successfully run health check.", clientAppId()); + asyncResponse.resume("ok"); + }).exceptionally(ex -> { + LOG.error("[{}] Fail to run health check.", clientAppId(), ex); + return handleCommonRestAsyncException(asyncResponse, ex); + }); + } - String messageStr = UUID.randomUUID().toString(); + private CompletableFuture internalRunHealthCheck(TopicVersion topicVersion) { + NamespaceName namespaceName = (topicVersion == TopicVersion.V2) + ? NamespaceService.getHeartbeatNamespaceV2(pulsar().getAdvertisedAddress(), pulsar().getConfiguration()) + : NamespaceService.getHeartbeatNamespace(pulsar().getAdvertisedAddress(), pulsar().getConfiguration()); + final String topicName = String.format("persistent://%s/%s", namespaceName, HEALTH_CHECK_TOPIC_SUFFIX); + LOG.info("[{}] Running healthCheck with topic={}", clientAppId(), topicName); + final String messageStr = UUID.randomUUID().toString(); + final String subscriptionName = "healthCheck-" + messageStr; // create non-partitioned topic manually and close the previous reader if present. - try { - pulsar().getBrokerService().getTopic(topic, true).get().ifPresent(t -> { - t.getSubscriptions().forEach((__, value) -> { - try { - value.deleteForcefully(); - } catch (Exception e) { - LOG.warn("Failed to delete previous subscription {} for health check", value.getName(), e); - } - }); + return pulsar().getBrokerService().getTopic(topicName, true) + .thenCompose(topicOptional -> { + if (!topicOptional.isPresent()) { + LOG.error("[{}] Fail to run health check while get topic {}. because get null value.", + clientAppId(), topicName); + throw new RestException(Status.NOT_FOUND, + String.format("Topic [%s] not found after create.", topicName)); + } + PulsarClient client; + try { + client = pulsar().getClient(); + } catch (PulsarServerException e) { + LOG.error("[{}] Fail to run health check while get client.", clientAppId()); + throw new RestException(e); + } + CompletableFuture resultFuture = new CompletableFuture<>(); + client.newProducer(Schema.STRING).topic(topicName).createAsync() + .thenCompose(producer -> client.newReader(Schema.STRING).topic(topicName) + .subscriptionName(subscriptionName) + .startMessageId(MessageId.latest) + .createAsync().exceptionally(createException -> { + producer.closeAsync().exceptionally(ex -> { + LOG.error("[{}] Close producer fail while heath check.", clientAppId()); + return null; + }); + throw FutureUtil.wrapToCompletionException(createException); + }).thenCompose(reader -> producer.sendAsync(messageStr) + .thenCompose(__ -> healthCheckRecursiveReadNext(reader, messageStr)) + .whenComplete((__, ex) -> { + closeAndReCheck(producer, reader, topicOptional.get(), subscriptionName) + .whenComplete((unused, innerEx) -> { + if (ex != null) { + resultFuture.completeExceptionally(ex); + } else { + resultFuture.complete(null); + } + }); + } + )) + ).exceptionally(ex -> { + resultFuture.completeExceptionally(ex); + return null; + }); + return resultFuture; }); - } catch (Exception e) { - LOG.warn("Failed to try to delete subscriptions for health check", e); - } - - CompletableFuture> producerFuture = - client.newProducer(Schema.STRING).topic(topic).createAsync(); - CompletableFuture> readerFuture = client.newReader(Schema.STRING) - .topic(topic).startMessageId(MessageId.latest).createAsync(); - - CompletableFuture completePromise = new CompletableFuture<>(); + } - CompletableFuture.allOf(producerFuture, readerFuture).whenComplete( - (ignore, exception) -> { - if (exception != null) { - completePromise.completeExceptionally(exception); + /** + * Close producer and reader and then to re-check if this operation is success. + * + * Re-check + * - Producer: If close fails we will print error log to notify user. + * - Consumer: If close fails we will force delete subscription. + * + * @param producer Producer + * @param reader Reader + * @param topic Topic + * @param subscriptionName Subscription name + */ + private CompletableFuture closeAndReCheck(Producer producer, Reader reader, + Topic topic, String subscriptionName) { + // no matter exception or success, we still need to + // close producer/reader + CompletableFuture producerFuture = producer.closeAsync(); + CompletableFuture readerFuture = reader.closeAsync(); + List> futures = new ArrayList<>(2); + futures.add(producerFuture); + futures.add(readerFuture); + return FutureUtil.waitForAll(Collections.unmodifiableList(futures)) + .exceptionally(closeException -> { + if (readerFuture.isCompletedExceptionally()) { + LOG.error("[{}] Close reader fail while heath check.", clientAppId()); + Subscription subscription = + topic.getSubscription(subscriptionName); + // re-check subscription after reader close + if (subscription != null) { + LOG.warn("[{}] Force delete subscription {} " + + "when it still exists after the" + + " reader is closed.", + clientAppId(), subscription); + subscription.deleteForcefully() + .exceptionally(ex -> { + LOG.error("[{}] Force delete subscription fail" + + " while health check", + clientAppId(), ex); + return null; + }); + } } else { - producerFuture.thenCompose((producer) -> producer.sendAsync(messageStr)) - .whenComplete((ignore2, exception2) -> { - if (exception2 != null) { - completePromise.completeExceptionally(exception2); - } - }); - - healthcheckReadLoop(readerFuture, completePromise, messageStr); - - // timeout read loop after 10 seconds - FutureUtil.addTimeoutHandling(completePromise, - HEALTHCHECK_READ_TIMEOUT, pulsar().getExecutor(), - () -> FutureUtil.createTimeoutException("Timed out reading", getClass(), - "healthcheck(...)")); + // producer future fail. + LOG.error("[{}] Close producer fail while heath check.", clientAppId()); } + return null; }); + } - completePromise.whenComplete((ignore, exception) -> { - producerFuture.thenAccept((producer) -> { - producer.closeAsync().whenComplete((ignore2, exception2) -> { - if (exception2 != null) { - LOG.warn("Error closing producer for healthcheck", exception2); + private CompletableFuture healthCheckRecursiveReadNext(Reader reader, String content) { + return reader.readNextAsync() + .thenCompose(msg -> { + if (!Objects.equals(content, msg.getValue())) { + return healthCheckRecursiveReadNext(reader, content); } + return CompletableFuture.completedFuture(null); }); - }); - readerFuture.thenAccept((reader) -> { - reader.closeAsync().whenComplete((ignore2, exception2) -> { - if (exception2 != null) { - LOG.warn("Error closing reader for healthcheck", exception2); - } - }); - }); - if (exception != null) { - asyncResponse.resume(new RestException(exception)); - } else { - asyncResponse.resume("ok"); - } - }); - } - - private void healthcheckReadLoop(CompletableFuture> readerFuture, - CompletableFuture completablePromise, - String messageStr) { - readerFuture.thenAccept((reader) -> { - CompletableFuture> readFuture = reader.readNextAsync() - .whenComplete((m, exception) -> { - if (exception != null) { - completablePromise.completeExceptionally(exception); - } else if (m.getValue().equals(messageStr)) { - completablePromise.complete(null); - } else { - healthcheckReadLoop(readerFuture, completablePromise, messageStr); - } - }); - }); } private synchronized void deleteDynamicConfigurationOnZk(String configName) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java index 07c676af4854b..a91499ed81b7e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/NamespacesBase.java @@ -48,6 +48,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.UriBuilder; +import org.apache.commons.collections4.ListUtils; import org.apache.commons.lang.mutable.MutableObject; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.PulsarServerException; @@ -58,10 +59,10 @@ import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentReplicator; import org.apache.pulsar.broker.service.persistent.PersistentTopic; -import org.apache.pulsar.broker.systopic.SystemTopicClient; import org.apache.pulsar.broker.web.RestException; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace; import org.apache.pulsar.common.naming.NamedEntity; import org.apache.pulsar.common.naming.NamespaceBundle; import org.apache.pulsar.common.naming.NamespaceBundleFactory; @@ -95,6 +96,8 @@ import org.apache.pulsar.common.policies.data.SubscribeRate; import org.apache.pulsar.common.policies.data.SubscriptionAuthMode; import org.apache.pulsar.common.policies.data.TenantOperation; +import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.common.policies.data.ValidateResult; import org.apache.pulsar.common.policies.data.impl.AutoTopicCreationOverrideImpl; import org.apache.pulsar.common.policies.data.impl.DispatchRateImpl; import org.apache.pulsar.common.util.FutureUtil; @@ -162,6 +165,48 @@ protected void internalDeleteNamespace(AsyncResponse asyncResponse, boolean auth } } + protected CompletableFuture> internalGetListOfTopics(Policies policies, + CommandGetTopicsOfNamespace.Mode mode) { + switch (mode) { + case ALL: + return pulsar().getNamespaceService().getListOfPersistentTopics(namespaceName) + .thenCombine(internalGetNonPersistentTopics(policies), + (persistentTopics, nonPersistentTopics) -> + ListUtils.union(persistentTopics, nonPersistentTopics)); + case NON_PERSISTENT: + return internalGetNonPersistentTopics(policies); + case PERSISTENT: + default: + return pulsar().getNamespaceService().getListOfPersistentTopics(namespaceName); + } + } + + protected CompletableFuture> internalGetNonPersistentTopics(Policies policies) { + final List>> futures = Lists.newArrayList(); + final List boundaries = policies.bundles.getBoundaries(); + for (int i = 0; i < boundaries.size() - 1; i++) { + final String bundle = String.format("%s_%s", boundaries.get(i), boundaries.get(i + 1)); + try { + futures.add(pulsar().getAdminClient().topics() + .getListInBundleAsync(namespaceName.toString(), bundle)); + } catch (PulsarServerException e) { + throw new RestException(e); + } + } + return FutureUtil.waitForAll(futures) + .thenApply(__ -> { + final List topics = Lists.newArrayList(); + for (int i = 0; i < futures.size(); i++) { + List topicList = futures.get(i).join(); + if (topicList != null) { + topics.addAll(topicList); + } + } + return topics.stream().filter(name -> !TopicName.get(name).isPersistent()) + .collect(Collectors.toList()); + }); + } + @SuppressWarnings("deprecation") protected void internalDeleteNamespace(AsyncResponse asyncResponse, boolean authoritative) { validateTenantOperation(namespaceName.getTenant(), TenantOperation.DELETE_NAMESPACE); @@ -237,7 +282,7 @@ protected void internalDeleteNamespace(AsyncResponse asyncResponse, boolean auth } boolean hasNonSystemTopic = false; for (String topic : topics) { - if (!SystemTopicClient.isSystemTopic(TopicName.get(topic))) { + if (!pulsar().getBrokerService().isSystemTopic(TopicName.get(topic))) { hasNonSystemTopic = true; break; } @@ -262,82 +307,80 @@ protected void internalDeleteNamespace(AsyncResponse asyncResponse, boolean auth // remove from owned namespace map and ephemeral node from ZK final List> futures = Lists.newArrayList(); - try { - // remove system topics first. - if (!topics.isEmpty()) { - for (String topic : topics) { - pulsar().getBrokerService().getTopicIfExists(topic).whenComplete((topicOptional, ex) -> { - topicOptional.ifPresent(systemTopic -> futures.add(systemTopic.deleteForcefully())); - }); + // remove system topics first. + if (!topics.isEmpty()) { + for (String topic : topics) { + try { + futures.add(pulsar().getAdminClient().topics().deleteAsync(topic, true, true)); + } catch (Exception ex) { + log.error("[{}] Failed to delete system topic {}", clientAppId(), topic, ex); + asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, ex)); + return; } } + } + FutureUtil.waitForAll(futures).thenCompose(__ -> { + List> deleteBundleFutures = Lists.newArrayList(); NamespaceBundles bundles = pulsar().getNamespaceService().getNamespaceBundleFactory() - .getBundles(namespaceName); + .getBundles(namespaceName); for (NamespaceBundle bundle : bundles.getBundles()) { // check if the bundle is owned by any broker, if not then we do not need to delete the bundle - if (pulsar().getNamespaceService().getOwner(bundle).isPresent()) { - futures.add(pulsar().getAdminClient().namespaces() - .deleteNamespaceBundleAsync(namespaceName.toString(), bundle.getBundleRange())); - } - } - } catch (Exception e) { - log.error("[{}] Failed to remove owned namespace {}", clientAppId(), namespaceName, e); - asyncResponse.resume(new RestException(e)); - return; - } - - FutureUtil.waitForAll(futures).handle((result, exception) -> { - if (exception != null) { - if (exception.getCause() instanceof PulsarAdminException) { - asyncResponse.resume(new RestException((PulsarAdminException) exception.getCause())); - return null; - } else { - log.error("[{}] Failed to remove owned namespace {}", clientAppId(), namespaceName, exception); - asyncResponse.resume(new RestException(exception.getCause())); - return null; - } - } - - try { - namespaceResources().getPartitionedTopicResources().clearPartitionedTopicMetadata(namespaceName); - - try { - pulsar().getPulsarResources().getTopicResources() - .clearDomainPersistence(namespaceName).get(); - pulsar().getPulsarResources().getTopicResources() - .clearNamespacePersistence(namespaceName).get(); - } catch (ExecutionException | InterruptedException e) { - // warn level log here since this failure has no side effect besides left a un-used metadata - // and also will not affect the re-creation of namespace - log.warn("[{}] Failed to remove managed-ledger for {}", clientAppId(), namespaceName, e); - } - - // we have successfully removed all the ownership for the namespace, the policies znode can be deleted - // now - namespaceResources().deletePolicies(namespaceName); - - try { - namespaceResources().deletePolicies(namespaceName); - } catch (NotFoundException e) { - // If the node with the modified information is not there anymore, we're already good - } - - try { - getLocalPolicies().deleteLocalPolicies(namespaceName); - } catch (NotFoundException nne) { - // If the z-node with the modified information is not there anymore, we're already good - } - } catch (Exception e) { - log.error("[{}] Failed to remove owned namespace {} from metadata", clientAppId(), namespaceName, e); - asyncResponse.resume(new RestException(e)); - return null; + deleteBundleFutures.add(pulsar().getNamespaceService().getOwnerAsync(bundle).thenCompose(ownership -> { + if (ownership.isPresent()) { + try { + return pulsar().getAdminClient().namespaces() + .deleteNamespaceBundleAsync(namespaceName.toString(), + bundle.getBundleRange()); + } catch (PulsarServerException e) { + throw new RestException(e); + } + } else { + return CompletableFuture.completedFuture(null); + } + })); } - + return FutureUtil.waitForAll(deleteBundleFutures); + }) + .thenCompose(__ -> internalClearZkSources()) + .thenAccept(__ -> { + log.info("[{}] Remove namespace successfully {}", clientAppId(), namespaceName); asyncResponse.resume(Response.noContent().build()); + }) + .exceptionally(ex -> { + Throwable cause = FutureUtil.unwrapCompletionException(ex); + log.error("[{}] Failed to remove namespace {}", clientAppId(), namespaceName, cause); + if (cause instanceof PulsarAdminException.ConflictException) { + log.info("[{}] There are new topics created during the namespace deletion, " + + "retry to delete the namespace again.", namespaceName); + pulsar().getExecutor().execute(() -> internalDeleteNamespace(asyncResponse, authoritative)); + } else { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } return null; }); } + // clear zk-node resources for deleting namespace + protected CompletableFuture internalClearZkSources() { + // clear resource of `/namespace/{namespaceName}` for zk-node + return namespaceResources().deleteNamespaceAsync(namespaceName) + .thenCompose(ignore -> namespaceResources().getPartitionedTopicResources() + .clearPartitionedTopicMetadataAsync(namespaceName)) + // clear resource for manager-ledger z-node + .thenCompose(ignore -> pulsar().getPulsarResources().getTopicResources() + .clearDomainPersistence(namespaceName)) + .thenCompose(ignore -> pulsar().getPulsarResources().getTopicResources() + .clearNamespacePersistence(namespaceName)) + // we have successfully removed all the ownership for the namespace, the policies + // z-node can be deleted now + .thenCompose(ignore -> namespaceResources().deletePoliciesAsync(namespaceName)) + // clear z-node of local policies + .thenCompose(ignore -> getLocalPolicies().deleteLocalPoliciesAsync(namespaceName)) + // clear /loadbalance/bundle-data + .thenCompose(ignore -> namespaceResources().deleteBundleDataAsync(namespaceName)); + + } + @SuppressWarnings("deprecation") protected void internalDeleteNamespaceForcefully(AsyncResponse asyncResponse, boolean authoritative) { validateTenantOperation(namespaceName.getTenant(), TenantOperation.DELETE_NAMESPACE); @@ -422,7 +465,8 @@ protected void internalDeleteNamespaceForcefully(AsyncResponse asyncResponse, bo } // remove from owned namespace map and ephemeral node from ZK - final List> futures = Lists.newArrayList(); + final List> topicFutures = Lists.newArrayList(); + final List> bundleFutures = Lists.newArrayList(); try { // firstly remove all topics including system topics if (!topics.isEmpty()) { @@ -436,12 +480,12 @@ protected void internalDeleteNamespaceForcefully(AsyncResponse asyncResponse, bo String partitionedTopic = topicName.getPartitionedTopicName(); if (!partitionedTopics.contains(partitionedTopic)) { // Distinguish partitioned topic to avoid duplicate deletion of the same schema - futures.add(pulsar().getAdminClient().topics().deletePartitionedTopicAsync( + topicFutures.add(pulsar().getAdminClient().topics().deletePartitionedTopicAsync( partitionedTopic, true, true)); partitionedTopics.add(partitionedTopic); } } else { - futures.add(pulsar().getAdminClient().topics().deleteAsync( + topicFutures.add(pulsar().getAdminClient().topics().deleteAsync( topic, true, true)); nonPartitionedTopics.add(topic); } @@ -462,65 +506,62 @@ protected void internalDeleteNamespaceForcefully(AsyncResponse asyncResponse, bo + "and non-partitioned-topics:{} in namespace:{}.", partitionedTopics, nonPartitionedTopics, namespaceName); } + + final CompletableFuture topicFutureEx = + FutureUtil.waitForAll(topicFutures).handle((result, exception) -> { + if (exception != null) { + if (exception.getCause() instanceof PulsarAdminException) { + asyncResponse + .resume(new RestException((PulsarAdminException) exception.getCause())); + } else { + log.error("[{}] Failed to remove forcefully owned namespace {}", + clientAppId(), namespaceName, exception); + asyncResponse.resume(new RestException(exception.getCause())); + } + return exception; + } + + return null; + }); + if (topicFutureEx.join() != null) { + return; + } } + // forcefully delete namespace bundles NamespaceBundles bundles = pulsar().getNamespaceService().getNamespaceBundleFactory() .getBundles(namespaceName); for (NamespaceBundle bundle : bundles.getBundles()) { // check if the bundle is owned by any broker, if not then we do not need to delete the bundle if (pulsar().getNamespaceService().getOwner(bundle).isPresent()) { - futures.add(pulsar().getAdminClient().namespaces() + bundleFutures.add(pulsar().getAdminClient().namespaces() .deleteNamespaceBundleAsync(namespaceName.toString(), bundle.getBundleRange(), true)); } } } catch (Exception e) { - log.error("[{}] Failed to remove owned namespace {}", clientAppId(), namespaceName, e); + log.error("[{}] Failed to remove forcefully owned namespace {}", clientAppId(), namespaceName, e); asyncResponse.resume(new RestException(e)); return; } - FutureUtil.waitForAll(futures).handle((result, exception) -> { + FutureUtil.waitForAll(bundleFutures).thenCompose(__ -> internalClearZkSources()).handle((result, exception) -> { if (exception != null) { - if (exception.getCause() instanceof PulsarAdminException) { - asyncResponse.resume(new RestException((PulsarAdminException) exception.getCause())); - return null; + Throwable cause = FutureUtil.unwrapCompletionException(exception); + if (cause instanceof PulsarAdminException.ConflictException) { + log.info("[{}] There are new topics created during the namespace deletion, " + + "retry to force delete the namespace again.", namespaceName); + pulsar().getExecutor().execute(() -> + internalDeleteNamespaceForcefully(asyncResponse, authoritative)); } else { - log.error("[{}] Failed to remove owned namespace {}", clientAppId(), namespaceName, exception); - asyncResponse.resume(new RestException(exception.getCause())); - return null; - } - } - - try { - // remove partitioned topics znode - pulsar().getPulsarResources().getNamespaceResources().getPartitionedTopicResources() - .clearPartitionedTopicMetadata(namespaceName); - - try { - pulsar().getPulsarResources().getTopicResources().clearDomainPersistence(namespaceName).get(); - pulsar().getPulsarResources().getTopicResources().clearNamespacePersistence(namespaceName).get(); - } catch (ExecutionException | InterruptedException e) { - // warn level log here since this failure has no side effect besides left a un-used metadata - // and also will not affect the re-creation of namespace - log.warn("[{}] Failed to remove managed-ledger for {}", clientAppId(), namespaceName, e); + log.error("[{}] Failed to remove forcefully owned namespace {}", + clientAppId(), namespaceName, cause); + asyncResponse.resume(new RestException(cause)); } - - // we have successfully removed all the ownership for the namespace, the policies znode can be deleted - // now - namespaceResources().deletePolicies(namespaceName); - - try { - getLocalPolicies().deleteLocalPolicies(namespaceName); - } catch (NotFoundException nne) { - // If the z-node with the modified information is not there anymore, we're already good - } - } catch (Exception e) { - log.error("[{}] Failed to remove owned namespace {} from ZK", clientAppId(), namespaceName, e); - asyncResponse.resume(new RestException(e)); return null; } asyncResponse.resume(Response.noContent().build()); + return null; }); } @@ -830,19 +871,29 @@ protected void internalSetSubscriptionExpirationTime(Integer expirationTime) { }); } + protected AutoTopicCreationOverride internalGetAutoTopicCreation() { + validateNamespacePolicyOperation(namespaceName, PolicyName.AUTO_TOPIC_CREATION, PolicyOperation.READ); + Policies policies = getNamespacePolicies(namespaceName); + return policies.autoTopicCreationOverride; + } + protected void internalSetAutoTopicCreation(AsyncResponse asyncResponse, AutoTopicCreationOverride autoTopicCreationOverride) { final int maxPartitions = pulsar().getConfig().getMaxNumPartitionsPerPartitionedTopic(); validateNamespacePolicyOperation(namespaceName, PolicyName.AUTO_TOPIC_CREATION, PolicyOperation.WRITE); validatePoliciesReadOnlyAccess(); if (autoTopicCreationOverride != null) { - if (!AutoTopicCreationOverrideImpl.isValidOverride(autoTopicCreationOverride)) { + ValidateResult validateResult = AutoTopicCreationOverrideImpl.validateOverride(autoTopicCreationOverride); + if (!validateResult.isSuccess()) { throw new RestException(Status.PRECONDITION_FAILED, - "Invalid configuration for autoTopicCreationOverride"); + "Invalid configuration for autoTopicCreationOverride. the detail is " + + validateResult.getErrorInfo()); } - if (maxPartitions > 0 && autoTopicCreationOverride.getDefaultNumPartitions() > maxPartitions) { - throw new RestException(Status.NOT_ACCEPTABLE, - "Number of partitions should be less than or equal to " + maxPartitions); + if (Objects.equals(autoTopicCreationOverride.getTopicType(), TopicType.PARTITIONED.toString())) { + if (maxPartitions > 0 && autoTopicCreationOverride.getDefaultNumPartitions() > maxPartitions) { + throw new RestException(Status.NOT_ACCEPTABLE, + "Number of partitions should be less than or equal to " + maxPartitions); + } } } // Force to read the data s.t. the watch to the cache content is setup. @@ -902,6 +953,12 @@ protected void internalSetAutoSubscriptionCreation( }); } + protected AutoSubscriptionCreationOverride internalGetAutoSubscriptionCreation() { + validateNamespacePolicyOperation(namespaceName, PolicyName.AUTO_SUBSCRIPTION_CREATION, PolicyOperation.READ); + Policies policies = getNamespacePolicies(namespaceName); + return policies.autoSubscriptionCreationOverride; + } + protected void internalRemoveAutoSubscriptionCreation(AsyncResponse asyncResponse) { internalSetAutoSubscriptionCreation(asyncResponse, null); } @@ -1142,7 +1199,7 @@ protected void internalSplitNamespaceBundle(AsyncResponse asyncResponse, String try { nsBundle = validateNamespaceBundleOwnership(namespaceName, policies.bundles, bundleRange, - authoritative, true); + authoritative, false); } catch (Exception e) { asyncResponse.resume(e); return; @@ -1217,13 +1274,7 @@ protected PublishRate internalGetPublishRate() { validateNamespacePolicyOperation(namespaceName, PolicyName.RATE, PolicyOperation.READ); Policies policies = getNamespacePolicies(namespaceName); - PublishRate publishRate = policies.publishMaxMessageRate.get(pulsar().getConfiguration().getClusterName()); - if (publishRate != null) { - return publishRate; - } else { - throw new RestException(Status.NOT_FOUND, - "Publish-rate is not configured for cluster " + pulsar().getConfiguration().getClusterName()); - } + return policies.publishMaxMessageRate.get(pulsar().getConfiguration().getClusterName()); } @SuppressWarnings("deprecation") @@ -1727,6 +1778,12 @@ protected void internalSetSubscriptionAuthMode(SubscriptionAuthMode subscription } } + protected SubscriptionAuthMode internalGetSubscriptionAuthMode() { + validateNamespacePolicyOperation(namespaceName, PolicyName.SUBSCRIPTION_AUTH_MODE, PolicyOperation.READ); + Policies policies = getNamespacePolicies(namespaceName); + return policies.subscription_auth_mode; + } + protected void internalModifyEncryptionRequired(boolean encryptionRequired) { validateNamespacePolicyOperation(namespaceName, PolicyName.ENCRYPTION, PolicyOperation.WRITE); validatePoliciesReadOnlyAccess(); @@ -1745,6 +1802,12 @@ protected void internalModifyEncryptionRequired(boolean encryptionRequired) { } } + protected Boolean internalGetEncryptionRequired() { + validateNamespacePolicyOperation(namespaceName, PolicyName.ENCRYPTION, PolicyOperation.READ); + Policies policies = getNamespacePolicies(namespaceName); + return policies.encryption_required; + } + protected DelayedDeliveryPolicies internalGetDelayedDelivery() { validateNamespacePolicyOperation(namespaceName, PolicyName.DELAYED_DELIVERY, PolicyOperation.READ); return getNamespacePolicies(namespaceName).delayed_delivery_policies; @@ -1869,7 +1932,7 @@ protected List internalGetAntiAffinityNamespaces(String cluster, String return namespaces.stream().filter(ns -> { Optional policies; try { - policies = getLocalPolicies().getLocalPolicies(namespaceName); + policies = getLocalPolicies().getLocalPolicies(NamespaceName.get(ns)); } catch (Exception e) { throw new RuntimeException(e); } @@ -1912,14 +1975,14 @@ private void clearBacklog(NamespaceName nsName, String bundleRange, String subsc } for (Topic topic : topicList) { if (topic instanceof PersistentTopic - && !SystemTopicClient.isSystemTopic(TopicName.get(topic.getName()))) { + && !pulsar().getBrokerService().isSystemTopic(TopicName.get(topic.getName()))) { futures.add(((PersistentTopic) topic).clearBacklog(subscription)); } } } else { for (Topic topic : topicList) { if (topic instanceof PersistentTopic - && !SystemTopicClient.isSystemTopic(TopicName.get(topic.getName()))) { + && !pulsar().getBrokerService().isSystemTopic(TopicName.get(topic.getName()))) { futures.add(((PersistentTopic) topic).clearBacklog()); } } @@ -2351,15 +2414,8 @@ protected SchemaCompatibilityStrategy internalGetSchemaCompatibilityStrategy() { validateNamespacePolicyOperation(namespaceName, PolicyName.SCHEMA_COMPATIBILITY_STRATEGY, PolicyOperation.READ); Policies policies = getNamespacePolicies(namespaceName); - SchemaCompatibilityStrategy schemaCompatibilityStrategy = policies.schema_compatibility_strategy; - if (schemaCompatibilityStrategy == SchemaCompatibilityStrategy.UNDEFINED) { - schemaCompatibilityStrategy = pulsar().getConfig().getSchemaCompatibilityStrategy(); - if (schemaCompatibilityStrategy == SchemaCompatibilityStrategy.UNDEFINED) { - schemaCompatibilityStrategy = SchemaCompatibilityStrategy - .fromAutoUpdatePolicy(policies.schema_auto_update_compatibility_strategy); - } - } - return schemaCompatibilityStrategy; + + return policies.schema_compatibility_strategy; } @Deprecated @@ -2387,10 +2443,15 @@ protected void internalSetSchemaCompatibilityStrategy(SchemaCompatibilityStrateg "schemaCompatibilityStrategy"); } - protected boolean internalGetSchemaValidationEnforced() { + protected boolean internalGetSchemaValidationEnforced(boolean applied) { validateNamespacePolicyOperation(namespaceName, PolicyName.SCHEMA_COMPATIBILITY_STRATEGY, PolicyOperation.READ); - return getNamespacePolicies(namespaceName).schema_validation_enforced; + boolean schemaValidationEnforced = getNamespacePolicies(namespaceName).schema_validation_enforced; + if (!schemaValidationEnforced && applied) { + return pulsar().getConfiguration().isSchemaValidationEnforced(); + } else { + return schemaValidationEnforced; + } } protected void internalSetSchemaValidationEnforced(boolean schemaValidationEnforced) { @@ -2408,6 +2469,9 @@ protected void internalSetSchemaValidationEnforced(boolean schemaValidationEnfor protected boolean internalGetIsAllowAutoUpdateSchema() { validateNamespacePolicyOperation(namespaceName, PolicyName.SCHEMA_COMPATIBILITY_STRATEGY, PolicyOperation.READ); + if (getNamespacePolicies(namespaceName).is_allow_auto_update_schema == null) { + return pulsar().getConfig().isAllowAutoUpdateSchemaEnabled(); + } return getNamespacePolicies(namespaceName).is_allow_auto_update_schema; } @@ -2571,7 +2635,8 @@ protected OffloadPoliciesImpl internalGetOffloadPolicies() { protected int internalGetMaxTopicsPerNamespace() { validateNamespacePolicyOperation(namespaceName, PolicyName.MAX_TOPICS, PolicyOperation.READ); - return getNamespacePolicies(namespaceName).max_topics_per_namespace; + return getNamespacePolicies(namespaceName).max_topics_per_namespace != null + ? getNamespacePolicies(namespaceName).max_topics_per_namespace : 0; } protected void internalRemoveMaxTopicsPerNamespace() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PackagesBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PackagesBase.java index ccb611d066d6b..8a8dcd9f03be9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PackagesBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PackagesBase.java @@ -65,6 +65,8 @@ private Void handleError(Throwable throwable, AsyncResponse asyncResponse) { asyncResponse.resume(new RestException(Response.Status.NOT_FOUND, throwable.getMessage())); } else if (throwable instanceof WebApplicationException) { asyncResponse.resume(throwable); + } else if (throwable instanceof UnsupportedOperationException) { + asyncResponse.resume(new RestException(Response.Status.SERVICE_UNAVAILABLE, throwable.getMessage())); } else { log.error("Encountered unexpected error", throwable); asyncResponse.resume(new RestException(Response.Status.INTERNAL_SERVER_ERROR, throwable.getMessage())); @@ -116,6 +118,8 @@ protected StreamingOutput internalDownload(String type, String tenant, String na } else { throw new RestException(Response.Status.INTERNAL_SERVER_ERROR, e.getCause().getMessage()); } + } catch (UnsupportedOperationException e) { + throw new RestException(Response.Status.SERVICE_UNAVAILABLE, e.getMessage()); } }; } catch (IllegalArgumentException illegalArgumentException) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java index 0271abdc05e42..7725f1cd98c5d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.admin.impl; +import static org.apache.pulsar.broker.PulsarService.isTransactionInternalName; import static org.apache.pulsar.broker.resources.PulsarResources.DEFAULT_OPERATION_TIMEOUT_SEC; import static org.apache.pulsar.common.events.EventsTopicNames.checkTopicIsTransactionCoordinatorAssign; import com.fasterxml.jackson.core.JsonProcessingException; @@ -55,7 +56,6 @@ import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; import org.apache.bookkeeper.mledger.ManagedLedgerException; -import org.apache.bookkeeper.mledger.ManagedLedgerException.MetadataNotFoundException; import org.apache.bookkeeper.mledger.ManagedLedgerInfo; import org.apache.bookkeeper.mledger.impl.ManagedLedgerFactoryImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; @@ -131,6 +131,7 @@ import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.BitSetRecyclable; +import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -160,7 +161,9 @@ protected List internalGetList() { } try { - return topicResources().listPersistentTopicsAsync(namespaceName).join(); + return topicResources().listPersistentTopicsAsync(namespaceName).thenApply(topics -> + topics.stream().filter(topic -> + !isTransactionInternalName(TopicName.get(topic))).collect(Collectors.toList())).join(); } catch (Exception e) { log.error("[{}] Failed to get topics list for namespace {}", clientAppId(), namespaceName, e); throw new RestException(e); @@ -168,7 +171,6 @@ protected List internalGetList() { } protected List internalGetPartitionedTopicList() { - validateAdminAccessForTenant(namespaceName.getTenant()); validateNamespaceOperation(namespaceName, NamespaceOperation.GET_TOPICS); // Validate that namespace exists, throws 404 if it doesn't exist try { @@ -231,7 +233,7 @@ protected void validateAdminAndClientPermission() { validateAdminAccessForTenant(topicName.getTenant()); } catch (Exception ve) { try { - checkAuthorization(pulsar(), topicName, clientAppId(), clientAuthData()); + checkAuthorizationAsync(pulsar(), topicName, clientAppId(), clientAuthData()); } catch (RestException re) { throw re; } catch (Exception e) { @@ -243,6 +245,13 @@ protected void validateAdminAndClientPermission() { } } + protected void validateCreateTopic(TopicName topicName) { + if (isTransactionInternalName(topicName)) { + log.warn("Forbidden to create transaction internal topic: {}", topicName); + throw new RestException(Status.BAD_REQUEST, "Cannot create topic in system topic format!"); + } + } + public void validateAdminOperationOnTopic(boolean authoritative) { validateAdminAccessForTenant(topicName.getTenant()); validateTopicOwnership(topicName, authoritative); @@ -260,7 +269,7 @@ private void grantPermissions(String topicUri, String role, Set acti }); log.info("[{}] Successfully granted access for role {}: {} - topic {}", clientAppId(), role, actions, topicUri); - } catch (org.apache.pulsar.metadata.api.MetadataStoreException.NotFoundException e) { + } catch (MetadataStoreException.NotFoundException e) { log.warn("[{}] Failed to grant permissions on topic {}: Namespace does not exist", clientAppId(), topicUri); throw new RestException(Status.NOT_FOUND, "Namespace does not exist"); } catch (Exception e) { @@ -292,7 +301,7 @@ protected void internalDeleteTopicForcefully(boolean authoritative, boolean dele try { pulsar().getBrokerService().deleteTopic(topicName.toString(), true, deleteSchema).get(); } catch (Exception e) { - if (e.getCause() instanceof MetadataNotFoundException) { + if (isManagedLedgerNotFoundException(e)) { log.info("[{}] Topic was already not existing {}", clientAppId(), topicName, e); } else { log.error("[{}] Failed to delete topic forcefully {}", clientAppId(), topicName, e); @@ -508,9 +517,14 @@ private CompletableFuture updatePartitionInOtherCluster(int numPartitions, if (cluster.equals(pulsar().getConfig().getClusterName())) { return; } - results.add(pulsar().getBrokerService().getClusterPulsarAdmin(cluster).topics() - .updatePartitionedTopicAsync(topicName.toString(), - numPartitions, true, false)); + CompletableFuture updatePartitionTopicFuture = + pulsar().getPulsarResources().getClusterResources().getClusterAsync(cluster) + .thenApply(clusterDataOp -> + pulsar().getBrokerService().getClusterPulsarAdmin(cluster, clusterDataOp)) + .thenCompose(pulsarAdmin -> + pulsarAdmin.topics().updatePartitionedTopicAsync( + topicName.toString(), numPartitions, true, false)); + results.add(updatePartitionTopicFuture); }); return FutureUtil.waitForAll(results); } @@ -574,42 +588,72 @@ protected void internalDeletePartitionedTopic(AsyncResponse asyncResponse, boole } }); } - for (int i = 0; i < numPartitions; i++) { - TopicName topicNamePartition = topicName.getPartition(i); - try { - pulsar().getAdminClient().topics() - .deleteAsync(topicNamePartition.toString(), force) - .whenComplete((r, ex) -> { - if (ex != null) { - if (ex instanceof NotFoundException) { - // if the sub-topic is not found, the client might not have called create - // producer or it might have been deleted earlier, - //so we ignore the 404 error. - // For all other exception, - //we fail the delete partition method even if a single - // partition is failed to be deleted - if (log.isDebugEnabled()) { - log.debug("[{}] Partition not found: {}", clientAppId(), - topicNamePartition); + // delete authentication policies of the partitioned topic + CompletableFuture deleteAuthFuture = new CompletableFuture<>(); + pulsar().getPulsarResources().getNamespaceResources() + .setPoliciesAsync(topicName.getNamespaceObject(), p -> { + for (int i = 0; i < numPartitions; i++) { + p.auth_policies.getTopicAuthentication().remove(topicName.getPartition(i).toString()); + } + p.auth_policies.getTopicAuthentication().remove(topicName.toString()); + return p; + }).thenAccept(v -> { + log.info("Successfully delete authentication policies for partitioned topic {}", topicName); + deleteAuthFuture.complete(null); + }).exceptionally(ex -> { + if (ex.getCause() instanceof MetadataStoreException.NotFoundException) { + log.warn("Namespace policies of {} not found", topicName.getNamespaceObject()); + deleteAuthFuture.complete(null); + } else { + log.error("Failed to delete authentication policies for partitioned topic {}", + topicName, ex); + deleteAuthFuture.completeExceptionally(ex); + } + return null; + }); + + deleteAuthFuture.whenComplete((r, ex) -> { + if (ex != null) { + future.completeExceptionally(ex); + return; + } + for (int i = 0; i < numPartitions; i++) { + TopicName topicNamePartition = topicName.getPartition(i); + try { + pulsar().getAdminClient().topics() + .deleteAsync(topicNamePartition.toString(), force) + .whenComplete((r1, ex1) -> { + if (ex1 != null) { + if (ex1 instanceof NotFoundException) { + // if the sub-topic is not found, the client might not have called + // create producer or it might have been deleted earlier, + //so we ignore the 404 error. + // For all other exception, + //we fail the delete partition method even if a single + // partition is failed to be deleted + if (log.isDebugEnabled()) { + log.debug("[{}] Partition not found: {}", clientAppId(), + topicNamePartition); + } + } else { + log.error("[{}] Failed to delete partition {}", clientAppId(), + topicNamePartition, ex1); + future.completeExceptionally(ex1); + return; } } else { - log.error("[{}] Failed to delete partition {}", clientAppId(), - topicNamePartition, ex); - future.completeExceptionally(ex); - return; + log.info("[{}] Deleted partition {}", clientAppId(), topicNamePartition); } - } else { - log.info("[{}] Deleted partition {}", clientAppId(), topicNamePartition); - } - if (count.decrementAndGet() == 0) { - future.complete(null); - } - }); - } catch (Exception e) { - log.error("[{}] Failed to delete partition {}", clientAppId(), topicNamePartition, e); - future.completeExceptionally(e); + if (count.decrementAndGet() == 0) { + future.complete(null); + } + }); + } catch (Exception e) { + log.error("[{}] Failed to delete partition {}", clientAppId(), topicNamePartition, e); + future.completeExceptionally(e); + } } - } + }); } else { future.complete(null); } @@ -976,7 +1020,7 @@ protected void internalDeleteTopic(boolean authoritative, boolean deleteSchema) log.error("[{}] Failed to delete topic {}", clientAppId(), topicName, t); if (t instanceof TopicBusyException) { throw new RestException(Status.PRECONDITION_FAILED, "Topic has active producers/subscriptions"); - } else if (t instanceof MetadataNotFoundException) { + } else if (isManagedLedgerNotFoundException(e)) { throw new RestException(Status.NOT_FOUND, "Topic not found"); } else { throw new RestException(t); @@ -1013,7 +1057,7 @@ protected void internalGetSubscriptions(AsyncResponse asyncResponse, boolean aut existsFutures.put(i, topicResources().persistentTopicExists(topicName.getPartition(i))); } FutureUtil.waitForAll(Lists.newArrayList(existsFutures.values())).thenApply(__ -> - existsFutures.entrySet().stream().filter(e -> e.getValue().join().booleanValue()) + existsFutures.entrySet().stream().filter(e -> e.getValue().join()) .map(item -> topicName.getPartition(item.getKey()).toString()) .collect(Collectors.toList()) ).thenAccept(topics -> { @@ -1080,26 +1124,25 @@ private void resumeAsyncResponse(AsyncResponse asyncResponse, Set subscr } private void internalGetSubscriptionsForNonPartitionedTopic(AsyncResponse asyncResponse, boolean authoritative) { - try { - validateTopicOwnership(topicName, authoritative); - validateTopicOperation(topicName, TopicOperation.GET_SUBSCRIPTIONS); - - Topic topic = getTopicReference(topicName); - final List subscriptions = Lists.newArrayList(); - topic.getSubscriptions().forEach((subName, sub) -> subscriptions.add(subName)); - asyncResponse.resume(subscriptions); - } catch (WebApplicationException wae) { - if (log.isDebugEnabled()) { - log.debug("[{}] Failed to get subscriptions for non-partitioned topic {}," - + " redirecting to other brokers.", - clientAppId(), topicName, wae); - } - resumeAsyncResponseExceptionally(asyncResponse, wae); - return; - } catch (Exception e) { - log.error("[{}] Failed to get list of subscriptions for {}", clientAppId(), topicName, e); - resumeAsyncResponseExceptionally(asyncResponse, e); - } + validateTopicOwnershipAsync(topicName, authoritative) + .thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.GET_SUBSCRIPTIONS)) + .thenCompose(__ -> getTopicReferenceAsync(topicName)) + .thenAccept(topic -> asyncResponse.resume(Lists.newArrayList(topic.getSubscriptions().keys()))) + .exceptionally(ex -> { + Throwable cause = ex.getCause(); + if (cause instanceof WebApplicationException + && ((WebApplicationException) cause).getResponse().getStatus() + == Status.TEMPORARY_REDIRECT.getStatusCode()) { + if (log.isDebugEnabled()) { + log.debug("[{}] Failed to get subscriptions for non-partitioned topic {}," + + " redirecting to other brokers.", clientAppId(), topicName, cause); + } + } else { + log.error("[{}] Failed to get list of subscriptions for {}", clientAppId(), topicName, cause); + } + resumeAsyncResponseExceptionally(asyncResponse, cause); + return null; + }); } protected TopicStats internalGetStats(boolean authoritative, boolean getPreciseBacklog, @@ -1441,35 +1484,38 @@ protected void internalDeleteSubscription(AsyncResponse asyncResponse, String su private void internalDeleteSubscriptionForNonPartitionedTopic(AsyncResponse asyncResponse, String subName, boolean authoritative) { - try { - validateTopicOwnership(topicName, authoritative); - validateTopicOperation(topicName, TopicOperation.UNSUBSCRIBE); - - Topic topic = getTopicReference(topicName); - Subscription sub = topic.getSubscription(subName); - if (sub == null) { - asyncResponse.resume(new RestException(Status.NOT_FOUND, "Subscription not found")); - return; - } - sub.delete().get(); - log.info("[{}][{}] Deleted subscription {}", clientAppId(), topicName, subName); - asyncResponse.resume(Response.noContent().build()); - } catch (Exception e) { - if (e.getCause() instanceof SubscriptionBusyException) { - log.error("[{}] Failed to delete subscription {} from topic {}", clientAppId(), subName, topicName, e); - asyncResponse.resume(new RestException(Status.PRECONDITION_FAILED, - "Subscription has active connected consumers")); - } else if (e instanceof WebApplicationException) { - if (log.isDebugEnabled()) { - log.debug("[{}] Failed to delete subscription from topic {}, redirecting to other brokers.", - clientAppId(), topicName, e); + validateTopicOwnershipAsync(topicName, authoritative) + .thenRun(() -> validateTopicOperation(topicName, TopicOperation.UNSUBSCRIBE)) + .thenCompose(__ -> { + Topic topic = getTopicReference(topicName); + Subscription sub = topic.getSubscription(subName); + if (sub == null) { + throw new RestException(Status.NOT_FOUND, "Subscription not found"); } - asyncResponse.resume(e); - } else { - log.error("[{}] Failed to delete subscription {} {}", clientAppId(), topicName, subName, e); - asyncResponse.resume(new RestException(e)); - } - } + return sub.delete(); + }).thenRun(() -> { + log.info("[{}][{}] Deleted subscription {}", clientAppId(), topicName, subName); + asyncResponse.resume(Response.noContent().build()); + }).exceptionally(e -> { + Throwable cause = e.getCause(); + if (cause instanceof SubscriptionBusyException) { + log.error("[{}] Failed to delete subscription {} from topic {}", clientAppId(), subName, + topicName, cause); + asyncResponse.resume(new RestException(Status.PRECONDITION_FAILED, + "Subscription has active connected consumers")); + } else if (cause instanceof WebApplicationException) { + if (log.isDebugEnabled() && ((WebApplicationException) cause).getResponse().getStatus() + == Status.TEMPORARY_REDIRECT.getStatusCode()) { + log.debug("[{}] Failed to delete subscription from topic {}, redirecting to other brokers.", + clientAppId(), topicName, cause); + } + asyncResponse.resume(cause); + } else { + log.error("[{}] Failed to delete subscription {} {}", clientAppId(), topicName, subName, cause); + asyncResponse.resume(new RestException(cause)); + } + return null; + }); } protected void internalDeleteSubscriptionForcefully(AsyncResponse asyncResponse, @@ -1538,33 +1584,34 @@ protected void internalDeleteSubscriptionForcefully(AsyncResponse asyncResponse, private void internalDeleteSubscriptionForNonPartitionedTopicForcefully(AsyncResponse asyncResponse, String subName, boolean authoritative) { - try { - validateTopicOwnership(topicName, authoritative); - validateTopicOperation(topicName, TopicOperation.UNSUBSCRIBE); - - Topic topic = getTopicReference(topicName); - Subscription sub = topic.getSubscription(subName); - if (sub == null) { - asyncResponse.resume(new RestException(Status.NOT_FOUND, "Subscription not found")); - return; - } - sub.deleteForcefully().get(); - log.info("[{}][{}] Deleted subscription forcefully {}", clientAppId(), topicName, subName); - asyncResponse.resume(Response.noContent().build()); - } catch (Exception e) { - if (e instanceof WebApplicationException) { - if (log.isDebugEnabled()) { - log.debug("[{}] Failed to delete subscription forcefully from topic {}," - + " redirecting to other brokers.", - clientAppId(), topicName, e); - } - asyncResponse.resume(e); - } else { - log.error("[{}] Failed to delete subscription forcefully {} {}", - clientAppId(), topicName, subName, e); - asyncResponse.resume(new RestException(e)); - } - } + validateTopicOwnershipAsync(topicName, authoritative) + .thenRun(() -> validateTopicOperation(topicName, TopicOperation.UNSUBSCRIBE)) + .thenCompose(__ -> { + Topic topic = getTopicReference(topicName); + Subscription sub = topic.getSubscription(subName); + if (sub == null) { + throw new RestException(Status.NOT_FOUND, "Subscription not found"); + } + return sub.deleteForcefully(); + }).thenRun(() -> { + log.info("[{}][{}] Deleted subscription forcefully {}", clientAppId(), topicName, subName); + asyncResponse.resume(Response.noContent().build()); + }).exceptionally(e -> { + Throwable cause = e.getCause(); + if (cause instanceof WebApplicationException) { + if (log.isDebugEnabled() && ((WebApplicationException) cause).getResponse().getStatus() + == Status.TEMPORARY_REDIRECT.getStatusCode()) { + log.debug("[{}] Failed to delete subscription from topic {}, redirecting to other brokers.", + clientAppId(), topicName, cause); + } + asyncResponse.resume(cause); + } else { + log.error("[{}] Failed to delete subscription forcefully {} {}", + clientAppId(), topicName, subName, cause); + asyncResponse.resume(new RestException(cause)); + } + return null; + }); } protected void internalSkipAllMessages(AsyncResponse asyncResponse, String subName, boolean authoritative) { @@ -2009,7 +2056,7 @@ protected void internalCreateSubscription(AsyncResponse asyncResponse, String su internalCreateSubscriptionForNonPartitionedTopic(asyncResponse, subscriptionName, targetMessageId, authoritative, replicated); } else { - boolean allowAutoTopicCreation = pulsar().getConfiguration().isAllowAutoTopicCreation(); + boolean allowAutoTopicCreation = pulsar().getBrokerService().isAllowAutoTopicCreation(topicName); getPartitionedTopicMetadataAsync(topicName, authoritative, allowAutoTopicCreation).thenAccept(partitionMetadata -> { final int numPartitions = partitionMetadata.partitions; @@ -2094,7 +2141,7 @@ private void internalCreateSubscriptionForNonPartitionedTopic( AsyncResponse asyncResponse, String subscriptionName, MessageIdImpl targetMessageId, boolean authoritative, boolean replicated) { - boolean isAllowAutoTopicCreation = pulsar().getConfiguration().isAllowAutoTopicCreation(); + boolean isAllowAutoTopicCreation = pulsar().getBrokerService().isAllowAutoTopicCreation(topicName); validateTopicOwnershipAsync(topicName, authoritative) .thenCompose(__ -> { @@ -2640,7 +2687,7 @@ protected PersistentOfflineTopicStats internalGetBacklog(boolean authoritative) // note that we do not want to load the topic and hence skip authorization check try { namespaceResources().getPolicies(namespaceName); - } catch (org.apache.pulsar.metadata.api.MetadataStoreException.NotFoundException e) { + } catch (MetadataStoreException.NotFoundException e) { log.warn("[{}] Failed to get topic backlog {}: Namespace does not exist", clientAppId(), namespaceName); throw new RestException(Status.NOT_FOUND, "Namespace does not exist"); } catch (Exception e) { @@ -3255,11 +3302,15 @@ private void internalExpireMessagesByTimestampForSinglePartition(String subName, if (subName.startsWith(topic.getReplicatorPrefix())) { String remoteCluster = PersistentReplicator.getRemoteCluster(subName); PersistentReplicator repl = (PersistentReplicator) topic.getPersistentReplicator(remoteCluster); - checkNotNull(repl); + if (repl == null) { + throw new RestException(Status.NOT_FOUND, "Replicator not found"); + } issued = repl.expireMessages(expireTimeInSeconds); } else { PersistentSubscription sub = topic.getSubscription(subName); - checkNotNull(sub); + if (sub == null) { + throw new RestException(Status.NOT_FOUND, "Subscription not found"); + } issued = sub.expireMessages(expireTimeInSeconds); } if (issued) { @@ -3512,46 +3563,55 @@ public static CompletableFuture getPartitionedTopicMet PulsarService pulsar, String clientAppId, String originalPrincipal, AuthenticationDataSource authenticationData, TopicName topicName) { CompletableFuture metadataFuture = new CompletableFuture<>(); - try { - // (1) authorize client - try { - checkAuthorization(pulsar, topicName, clientAppId, authenticationData); - } catch (RestException e) { - try { - validateAdminAccessForTenant(pulsar, - clientAppId, originalPrincipal, topicName.getTenant(), authenticationData); - } catch (RestException authException) { - log.warn("Failed to authorize {} on cluster {}", clientAppId, topicName.toString()); - throw new PulsarClientException(String.format("Authorization failed %s on topic %s with error %s", - clientAppId, topicName.toString(), authException.getMessage())); - } - } catch (Exception ex) { - // throw without wrapping to PulsarClientException that considers: unknown error marked as internal - // server error - log.warn("Failed to authorize {} on cluster {} with unexpected exception {}", clientAppId, - topicName.toString(), ex.getMessage(), ex); - throw ex; - } + CompletableFuture authorizationFuture = new CompletableFuture<>(); + checkAuthorizationAsync(pulsar, topicName, clientAppId, authenticationData) + .thenRun(() -> authorizationFuture.complete(null)) + .exceptionally(e -> { + Throwable throwable = FutureUtil.unwrapCompletionException(e); + if (throwable instanceof RestException) { + validateAdminAccessForTenantAsync(pulsar, + clientAppId, originalPrincipal, topicName.getTenant(), authenticationData) + .thenRun(() -> { + authorizationFuture.complete(null); + }).exceptionally(ex -> { + Throwable throwable2 = FutureUtil.unwrapCompletionException(ex); + if (throwable2 instanceof RestException) { + log.warn("Failed to authorize {} on topic {}", clientAppId, topicName); + authorizationFuture.completeExceptionally(new PulsarClientException( + String.format("Authorization failed %s on topic %s with error %s", + clientAppId, topicName, throwable2.getMessage()))); + } else { + authorizationFuture.completeExceptionally(throwable2); + } + return null; + }); + } else { + // throw without wrapping to PulsarClientException that considers: unknown error marked as + // internal server error + log.warn("Failed to authorize {} on topic {}", clientAppId, topicName, throwable); + authorizationFuture.completeExceptionally(throwable); + } + return null; + }); - // validates global-namespace contains local/peer cluster: if peer/local cluster present then lookup can - // serve/redirect request else fail partitioned-metadata-request so, client fails while creating - // producer/consumer - checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject()) - .thenCompose(res -> pulsar.getBrokerService() - .fetchPartitionedTopicMetadataCheckAllowAutoCreationAsync(topicName)) - .thenAccept(metadata -> { - if (log.isDebugEnabled()) { - log.debug("[{}] Total number of partitions for topic {} is {}", clientAppId, topicName, - metadata.partitions); - } - metadataFuture.complete(metadata); - }).exceptionally(ex -> { - metadataFuture.completeExceptionally(ex.getCause()); - return null; - }); - } catch (Exception ex) { - metadataFuture.completeExceptionally(ex); - } + // validates global-namespace contains local/peer cluster: if peer/local cluster present then lookup can + // serve/redirect request else fail partitioned-metadata-request so, client fails while creating + // producer/consumer + authorizationFuture.thenCompose(__ -> + checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject())) + .thenCompose(res -> + pulsar.getBrokerService().fetchPartitionedTopicMetadataCheckAllowAutoCreationAsync(topicName)) + .thenAccept(metadata -> { + if (log.isDebugEnabled()) { + log.debug("[{}] Total number of partitions for topic {} is {}", clientAppId, topicName, + metadata.partitions); + } + metadataFuture.complete(metadata); + }) + .exceptionally(e -> { + metadataFuture.completeExceptionally(FutureUtil.unwrapCompletionException(e)); + return null; + }); return metadataFuture; } @@ -3593,19 +3653,18 @@ private Topic getTopicReference(TopicName topicName) { } catch (RestException e) { throw e; } catch (Exception e) { - throw new RestException(e); + if (e.getCause() instanceof NotAllowedException) { + throw new RestException(Status.BAD_REQUEST, e.getCause()); + } + throw new RestException(e.getCause() == null ? e : e.getCause()); } } private CompletableFuture getTopicReferenceAsync(TopicName topicName) { return pulsar().getBrokerService().getTopicIfExists(topicName.toString()) - .thenCompose(optTopic -> { - if (optTopic.isPresent()) { - return CompletableFuture.completedFuture(optTopic.get()); - } else { - return topicNotFoundReasonAsync(topicName); - } - }); + .thenCompose(optTopic -> optTopic + .map(CompletableFuture::completedFuture) + .orElseGet(() -> topicNotFoundReasonAsync(topicName))); } private RestException topicNotFoundReason(TopicName topicName) { @@ -4202,8 +4261,9 @@ protected void internalHandleResult(AsyncResponse asyncResponse, protected void handleTopicPolicyException(String methodName, Throwable thr, AsyncResponse asyncResponse) { Throwable cause = thr.getCause(); - if (!(cause instanceof WebApplicationException) - || !(((WebApplicationException) cause).getResponse().getStatus() == 307)) { + if (!(cause instanceof WebApplicationException) || !( + ((WebApplicationException) cause).getResponse().getStatus() == 307 + || ((WebApplicationException) cause).getResponse().getStatus() == 404)) { log.error("[{}] Failed to perform {} on topic {}", clientAppId(), methodName, topicName, cause); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java index 87fc3aecbd07d..b94b8a2d9623c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/SchemasResourceBase.java @@ -38,8 +38,7 @@ import org.apache.pulsar.broker.web.RestException; import org.apache.pulsar.client.internal.DefaultImplementation; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.Policies; -import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; +import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.protocol.schema.DeleteSchemaResponse; import org.apache.pulsar.common.protocol.schema.GetAllVersionsSchemaResponse; import org.apache.pulsar.common.protocol.schema.GetSchemaResponse; @@ -85,7 +84,7 @@ private String getSchemaId() { } public void getSchema(boolean authoritative, AsyncResponse response) { - validateDestinationAndAdminOperation(authoritative); + validateOwnershipAndOperation(authoritative, TopicOperation.GET_METADATA); String schemaId = getSchemaId(); pulsar().getSchemaRegistryService().getSchema(schemaId).handle((schema, error) -> { handleGetSchemaResponse(response, schema, error); @@ -94,7 +93,7 @@ public void getSchema(boolean authoritative, AsyncResponse response) { } public void getSchema(boolean authoritative, String version, AsyncResponse response) { - validateDestinationAndAdminOperation(authoritative); + validateOwnershipAndOperation(authoritative, TopicOperation.GET_METADATA); String schemaId = getSchemaId(); ByteBuffer bbVersion = ByteBuffer.allocate(Long.BYTES); bbVersion.putLong(Long.parseLong(version)); @@ -106,7 +105,7 @@ public void getSchema(boolean authoritative, String version, AsyncResponse respo } public void getAllSchemas(boolean authoritative, AsyncResponse response) { - validateDestinationAndAdminOperation(authoritative); + validateOwnershipAndOperation(authoritative, TopicOperation.GET_METADATA); String schemaId = getSchemaId(); pulsar().getSchemaRegistryService().trimDeletedSchemaAndGetList(schemaId).handle((schema, error) -> { @@ -136,16 +135,7 @@ public void deleteSchema(boolean authoritative, AsyncResponse response) { public void postSchema(PostSchemaPayload payload, boolean authoritative, AsyncResponse response) { validateDestinationAndAdminOperation(authoritative); - getNamespacePoliciesAsync(namespaceName).thenAccept(policies -> { - SchemaCompatibilityStrategy schemaCompatibilityStrategy = policies.schema_compatibility_strategy; - if (schemaCompatibilityStrategy == SchemaCompatibilityStrategy.UNDEFINED) { - schemaCompatibilityStrategy = - pulsar().getConfig().getSchemaCompatibilityStrategy(); - if (schemaCompatibilityStrategy == SchemaCompatibilityStrategy.UNDEFINED) { - schemaCompatibilityStrategy = SchemaCompatibilityStrategy - .fromAutoUpdatePolicy(policies.schema_auto_update_compatibility_strategy); - } - } + getSchemaCompatibilityStrategyAsync().thenAccept(schemaCompatibilityStrategy -> { byte[] data; if (SchemaType.KEY_VALUE.name().equals(payload.getType())) { try { @@ -199,26 +189,17 @@ public void testCompatibility(PostSchemaPayload payload, boolean authoritative, validateDestinationAndAdminOperation(authoritative); String schemaId = getSchemaId(); - Policies policies = getNamespacePolicies(namespaceName); - SchemaCompatibilityStrategy schemaCompatibilityStrategy; - if (policies.schema_compatibility_strategy == SchemaCompatibilityStrategy.UNDEFINED) { - schemaCompatibilityStrategy = SchemaCompatibilityStrategy - .fromAutoUpdatePolicy(policies.schema_auto_update_compatibility_strategy); - } else { - schemaCompatibilityStrategy = policies.schema_compatibility_strategy; - } - - pulsar().getSchemaRegistryService() - .isCompatible(schemaId, - SchemaData.builder().data(payload.getSchema().getBytes(Charsets.UTF_8)).isDeleted(false) - .timestamp(clock.millis()).type(SchemaType.valueOf(payload.getType())) - .user(defaultIfEmpty(clientAppId(), "")).props(payload.getProperties()).build(), - schemaCompatibilityStrategy) - .thenAccept(isCompatible -> response.resume(Response.accepted() - .entity(IsCompatibilityResponse.builder().isCompatibility(isCompatible) - .schemaCompatibilityStrategy(schemaCompatibilityStrategy.name()).build()) - .build())) + getSchemaCompatibilityStrategyAsync().thenCompose(schemaCompatibilityStrategy -> pulsar() + .getSchemaRegistryService().isCompatible(schemaId, + SchemaData.builder().data(payload.getSchema().getBytes(Charsets.UTF_8)).isDeleted(false) + .timestamp(clock.millis()).type(SchemaType.valueOf(payload.getType())) + .user(defaultIfEmpty(clientAppId(), "")).props(payload.getProperties()).build(), + schemaCompatibilityStrategy) + .thenAccept(isCompatible -> response.resume(Response.accepted() + .entity(IsCompatibilityResponse.builder().isCompatibility(isCompatible) + .schemaCompatibilityStrategy(schemaCompatibilityStrategy.name()).build()) + .build()))) .exceptionally(error -> { response.resume(new RestException(error)); return null; @@ -228,7 +209,7 @@ public void testCompatibility(PostSchemaPayload payload, boolean authoritative, public void getVersionBySchema( PostSchemaPayload payload, boolean authoritative, AsyncResponse response) { - validateDestinationAndAdminOperation(authoritative); + validateOwnershipAndOperation(authoritative, TopicOperation.GET_METADATA); String schemaId = getSchemaId(); @@ -272,9 +253,11 @@ private static GetSchemaResponse convertSchemaAndMetadataToGetSchemaResponse(Sch private static void handleGetSchemaResponse(AsyncResponse response, SchemaAndMetadata schema, Throwable error) { if (isNull(error)) { if (isNull(schema)) { - response.resume(Response.status(Response.Status.NOT_FOUND).build()); + response.resume(Response.status( + Response.Status.NOT_FOUND.getStatusCode(), "Schema not found").build()); } else if (schema.schema.isDeleted()) { - response.resume(Response.status(Response.Status.NOT_FOUND).build()); + response.resume(Response.status( + Response.Status.NOT_FOUND.getStatusCode(), "Schema is deleted").build()); } else { response.resume(Response.ok().encoding(MediaType.APPLICATION_JSON) .entity(convertSchemaAndMetadataToGetSchemaResponse(schema)).build()); @@ -290,7 +273,8 @@ private static void handleGetAllSchemasResponse(AsyncResponse response, List { - if (e != null) { - log.error("[{}] Failed to create tenant ", clientAppId, e.getCause()); - asyncResponse.resume(new RestException(e)); + tenantResources().tenantExistsAsync(tenant).thenAccept(exist -> { + if (exist) { + asyncResponse.resume(new RestException(Status.CONFLICT, "Tenant already exist")); return; } - - int maxTenants = pulsar().getConfiguration().getMaxTenants(); - // Due to the cost of distributed locks, no locks are added here. - // In a concurrent scenario, the threshold will be exceeded. - if (maxTenants > 0) { - if (tenants != null && tenants.size() >= maxTenants) { - asyncResponse.resume( - new RestException(Status.PRECONDITION_FAILED, "Exceed the maximum number of tenants")); + tenantResources().listTenantsAsync().whenComplete((tenants, e) -> { + if (e != null) { + log.error("[{}] Failed to create tenant ", clientAppId, e.getCause()); + asyncResponse.resume(new RestException(e)); return; } - } - tenantResources().tenantExistsAsync(tenant).thenAccept(exist ->{ - if (exist) { - asyncResponse.resume(new RestException(Status.CONFLICT, "Tenant already exist")); - return; + int maxTenants = pulsar().getConfiguration().getMaxTenants(); + // Due to the cost of distributed locks, no locks are added here. + // In a concurrent scenario, the threshold will be exceeded. + if (maxTenants > 0) { + if (tenants != null && tenants.size() >= maxTenants) { + asyncResponse.resume( + new RestException(Status.PRECONDITION_FAILED, "Exceed the maximum number of tenants")); + return; + } } tenantResources().createTenantAsync(tenant, tenantInfo).thenAccept((r) -> { log.info("[{}] Created tenant {}", clientAppId(), tenant); asyncResponse.resume(Response.noContent().build()); }).exceptionally(ex -> { - log.error("[{}] Failed to create tenant {}", clientAppId, tenant, e); + log.error("[{}] Failed to create tenant {}", clientAppId, tenant, ex); asyncResponse.resume(new RestException(ex)); return null; }); }).exceptionally(ex -> { - log.error("[{}] Failed to create tenant {}", clientAppId(), tenant, e); + log.error("[{}] Failed to create tenant {}", clientAppId(), tenant, ex); asyncResponse.resume(new RestException(ex)); return null; }); @@ -177,12 +176,12 @@ public void createTenant(@Suspended final AsyncResponse asyncResponse, @POST @Path("/{tenant}") @ApiOperation(value = "Update the admins for a tenant.", - notes = "This operation requires Pulsar super-user privileges.") - @ApiResponses(value = { @ApiResponse(code = 403, message = "The requester doesn't have admin permissions"), + notes = "This operation requires Pulsar super-user privileges.") + @ApiResponses(value = {@ApiResponse(code = 403, message = "The requester doesn't have admin permissions"), @ApiResponse(code = 404, message = "Tenant does not exist"), @ApiResponse(code = 409, message = "Tenant already exists"), @ApiResponse(code = 412, message = "Clusters can not be empty"), - @ApiResponse(code = 412, message = "Clusters do not exist") }) + @ApiResponse(code = 412, message = "Clusters do not exist")}) public void updateTenant(@Suspended final AsyncResponse asyncResponse, @ApiParam(value = "The tenant name") @PathParam("tenant") String tenant, @ApiParam(value = "TenantInfo") TenantInfoImpl newTenantAdmin) { @@ -227,10 +226,10 @@ public void updateTenant(@Suspended final AsyncResponse asyncResponse, @DELETE @Path("/{tenant}") @ApiOperation(value = "Delete a tenant and all namespaces and topics under it.") - @ApiResponses(value = { @ApiResponse(code = 403, message = "The requester doesn't have admin permissions"), + @ApiResponses(value = {@ApiResponse(code = 403, message = "The requester doesn't have admin permissions"), @ApiResponse(code = 404, message = "Tenant does not exist"), @ApiResponse(code = 405, message = "Broker doesn't allow forced deletion of tenants"), - @ApiResponse(code = 409, message = "The tenant still has active namespaces") }) + @ApiResponse(code = 409, message = "The tenant still has active namespaces")}) public void deleteTenant(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") @ApiParam(value = "The tenant name") String tenant, @QueryParam("force") @DefaultValue("false") boolean force) { @@ -258,27 +257,28 @@ protected void internalDeleteTenant(AsyncResponse asyncResponse, String tenant) asyncResponse.resume(new RestException(Status.NOT_FOUND, "Tenant doesn't exist")); return null; } - return hasActiveNamespace(tenant).thenAccept(ns -> { - try { - tenantResources().deleteTenantAsync(tenant) - .thenAccept(t -> { - log.info("[{}] Deleted tenant {}", clientAppId(), tenant); - asyncResponse.resume(Response.noContent().build()); - }).exceptionally(ex -> { - log.error("Failed to delete tenant {}", tenant, ex.getCause()); - asyncResponse.resume(new RestException(ex)); - return null; + + return hasActiveNamespace(tenant) + .thenCompose(ignore -> tenantResources().deleteTenantAsync(tenant)) + .thenCompose(ignore -> pulsar().getPulsarResources().getTopicResources() + .clearTenantPersistence(tenant)) + .thenCompose(ignore -> pulsar().getPulsarResources().getNamespaceResources() + .deleteTenantAsync(tenant)) + .thenCompose(ignore -> pulsar().getPulsarResources().getNamespaceResources() + .deleteBundleDataTenantAsync(tenant)) + .whenComplete((ignore, ex) -> { + if (ex != null) { + log.error("[{}] Failed to delete tenant {}", clientAppId(), tenant, ex); + if (ex.getCause() instanceof IllegalStateException) { + asyncResponse.resume(new RestException(Status.CONFLICT, ex.getCause())); + } else { + asyncResponse.resume(new RestException(ex)); + } + } else { + log.info("[{}] Deleted tenant {}", clientAppId(), tenant); + asyncResponse.resume(Response.noContent().build()); + } }); - log.info("[{}] Deleted tenant {}", clientAppId(), tenant); - } catch (Exception e) { - log.error("[{}] Failed to delete tenant {}", clientAppId(), tenant, e); - asyncResponse.resume(new RestException(e)); - } - }).exceptionally(ex -> { - log.error("Failed to delete tenant due to active namespace {}", tenant, ex.getCause()); - asyncResponse.resume(new RestException(ex)); - return null; - }); }); } @@ -319,14 +319,6 @@ protected void internalDeleteTenantForcefully(AsyncResponse asyncResponse, Strin return null; } - - try { - pulsar().getPulsarResources().getTopicResources().clearTenantPersistence(tenant).get(); - } catch (ExecutionException | InterruptedException e) { - // warn level log here since this failure has no side effect besides left a un-used metadata - // and also will not affect the re-creation of tenant - log.warn("[{}] Failed to remove managed-ledger for {}", clientAppId(), tenant, e); - } // delete tenant normally internalDeleteTenant(asyncResponse, tenant); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java index 504ce92de45c2..8eff6815404cd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/TransactionsBase.java @@ -18,11 +18,9 @@ */ package org.apache.pulsar.broker.admin.impl; -import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED; import static javax.ws.rs.core.Response.Status.NOT_FOUND; import static javax.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; -import static javax.ws.rs.core.Response.Status.TEMPORARY_REDIRECT; import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.HashMap; @@ -38,9 +36,6 @@ import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.admin.AdminResource; -import org.apache.pulsar.broker.service.BrokerServiceException.NotAllowedException; -import org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException; -import org.apache.pulsar.broker.service.BrokerServiceException.SubscriptionNotFoundException; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.web.RestException; @@ -49,6 +44,7 @@ import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.TransactionBufferStats; import org.apache.pulsar.common.policies.data.TransactionCoordinatorInternalStats; import org.apache.pulsar.common.policies.data.TransactionCoordinatorStats; import org.apache.pulsar.common.policies.data.TransactionInBufferStats; @@ -56,6 +52,7 @@ import org.apache.pulsar.common.policies.data.TransactionLogStats; import org.apache.pulsar.common.policies.data.TransactionMetadata; import org.apache.pulsar.common.policies.data.TransactionPendingAckInternalStats; +import org.apache.pulsar.common.policies.data.TransactionPendingAckStats; import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; @@ -70,216 +67,97 @@ public abstract class TransactionsBase extends AdminResource { protected void internalGetCoordinatorStats(AsyncResponse asyncResponse, boolean authoritative, Integer coordinatorId) { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - if (coordinatorId != null) { - validateTopicOwnership(TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(coordinatorId), - authoritative); - TransactionMetadataStore transactionMetadataStore = - pulsar().getTransactionMetadataStoreService().getStores() - .get(TransactionCoordinatorID.get(coordinatorId)); - if (transactionMetadataStore == null) { - asyncResponse.resume(new RestException(NOT_FOUND, - "Transaction coordinator not found! coordinator id : " + coordinatorId)); + if (coordinatorId != null) { + validateTopicOwnership(TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(coordinatorId), + authoritative); + TransactionMetadataStore transactionMetadataStore = + pulsar().getTransactionMetadataStoreService().getStores() + .get(TransactionCoordinatorID.get(coordinatorId)); + if (transactionMetadataStore == null) { + asyncResponse.resume(new RestException(NOT_FOUND, + "Transaction coordinator not found! coordinator id : " + coordinatorId)); + return; + } + asyncResponse.resume(transactionMetadataStore.getCoordinatorStats()); + } else { + getPartitionedTopicMetadataAsync(TopicName.TRANSACTION_COORDINATOR_ASSIGN, + false, false).thenAccept(partitionMetadata -> { + if (partitionMetadata.partitions == 0) { + asyncResponse.resume(new RestException(Response.Status.NOT_FOUND, + "Transaction coordinator not found")); return; } - asyncResponse.resume(transactionMetadataStore.getCoordinatorStats()); - } else { - getPartitionedTopicMetadataAsync(TopicName.TRANSACTION_COORDINATOR_ASSIGN, - false, false).thenAccept(partitionMetadata -> { - if (partitionMetadata.partitions == 0) { - asyncResponse.resume(new RestException(Response.Status.NOT_FOUND, - "Transaction coordinator not found")); + List> transactionMetadataStoreInfoFutures = + Lists.newArrayList(); + for (int i = 0; i < partitionMetadata.partitions; i++) { + try { + transactionMetadataStoreInfoFutures + .add(pulsar().getAdminClient().transactions().getCoordinatorStatsByIdAsync(i)); + } catch (PulsarServerException e) { + asyncResponse.resume(new RestException(e)); return; } - List> transactionMetadataStoreInfoFutures = - Lists.newArrayList(); - for (int i = 0; i < partitionMetadata.partitions; i++) { + } + Map stats = new HashMap<>(); + FutureUtil.waitForAll(transactionMetadataStoreInfoFutures).whenComplete((result, e) -> { + if (e != null) { + asyncResponse.resume(new RestException(e)); + return; + } + + for (int i = 0; i < transactionMetadataStoreInfoFutures.size(); i++) { try { - transactionMetadataStoreInfoFutures - .add(pulsar().getAdminClient().transactions().getCoordinatorStatsByIdAsync(i)); - } catch (PulsarServerException e) { - asyncResponse.resume(new RestException(e)); + stats.put(i, transactionMetadataStoreInfoFutures.get(i).get()); + } catch (Exception exception) { + asyncResponse.resume(new RestException(exception.getCause())); return; } } - Map stats = new HashMap<>(); - FutureUtil.waitForAll(transactionMetadataStoreInfoFutures).whenComplete((result, e) -> { - if (e != null) { - asyncResponse.resume(new RestException(e)); - return; - } - for (int i = 0; i < transactionMetadataStoreInfoFutures.size(); i++) { - try { - stats.put(i, transactionMetadataStoreInfoFutures.get(i).get()); - } catch (Exception exception) { - asyncResponse.resume(new RestException(exception.getCause())); - return; - } - } - - asyncResponse.resume(stats); - }); - }).exceptionally(ex -> { - log.error("[{}] Failed to get transaction coordinator state.", clientAppId(), ex); - resumeAsyncResponseExceptionally(asyncResponse, ex); - return null; + asyncResponse.resume(stats); }); - } - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, - "This Broker is not configured with transactionCoordinatorEnabled=true.")); + }).exceptionally(ex -> { + log.error("[{}] Failed to get transaction coordinator state.", clientAppId(), ex); + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); } } - protected void internalGetTransactionInPendingAckStats(AsyncResponse asyncResponse, boolean authoritative, - long mostSigBits, long leastSigBits, String subName) { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - validateTopicOwnership(topicName, authoritative); - CompletableFuture> topicFuture = pulsar().getBrokerService() - .getTopics().get(topicName.toString()); - if (topicFuture != null) { - topicFuture.whenComplete((optionalTopic, e) -> { - if (e != null) { - asyncResponse.resume(new RestException(e)); - return; - } - if (!optionalTopic.isPresent()) { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, - "Topic is not owned by this broker!")); - return; - } - Topic topicObject = optionalTopic.get(); - if (topicObject instanceof PersistentTopic) { - asyncResponse.resume(((PersistentTopic) topicObject) - .getTransactionInPendingAckStats(new TxnID(mostSigBits, leastSigBits), subName)); - } else { - asyncResponse.resume(new RestException(BAD_REQUEST, "Topic is not a persistent topic!")); - } - }); - } else { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, "Topic is not owned by this broker!")); - } - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, - "This Broker is not configured with transactionCoordinatorEnabled=true.")); - } + protected CompletableFuture internalGetTransactionInPendingAckStats( + boolean authoritative, long mostSigBits, long leastSigBits, String subName) { + return getExistingPersistentTopicAsync(authoritative) + .thenApply(topic -> topic.getTransactionInPendingAckStats(new TxnID(mostSigBits, leastSigBits), + subName)); } - protected void internalGetTransactionInBufferStats(AsyncResponse asyncResponse, boolean authoritative, - long mostSigBits, long leastSigBits) { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - validateTopicOwnership(topicName, authoritative); - CompletableFuture> topicFuture = pulsar().getBrokerService() - .getTopics().get(topicName.toString()); - if (topicFuture != null) { - topicFuture.whenComplete((optionalTopic, e) -> { - if (e != null) { - asyncResponse.resume(new RestException(e)); - return; - } - if (!optionalTopic.isPresent()) { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, - "Topic is not owned by this broker!")); - return; - } - Topic topicObject = optionalTopic.get(); - if (topicObject instanceof PersistentTopic) { - TransactionInBufferStats transactionInBufferStats = ((PersistentTopic) topicObject) - .getTransactionInBufferStats(new TxnID(mostSigBits, leastSigBits)); - asyncResponse.resume(transactionInBufferStats); - } else { - asyncResponse.resume(new RestException(BAD_REQUEST, "Topic is not a persistent topic!")); - } - }); - } else { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, "Topic is not owned by this broker!")); - } - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, - "This Broker is not configured with transactionCoordinatorEnabled=true.")); - } + protected CompletableFuture internalGetTransactionInBufferStats( + boolean authoritative, long mostSigBits, long leastSigBits) { + return getExistingPersistentTopicAsync(authoritative) + .thenApply(topic -> topic.getTransactionInBufferStats(new TxnID(mostSigBits, leastSigBits))); } - protected void internalGetTransactionBufferStats(AsyncResponse asyncResponse, boolean authoritative) { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - validateTopicOwnership(topicName, authoritative); - CompletableFuture> topicFuture = pulsar().getBrokerService() - .getTopics().get(topicName.toString()); - if (topicFuture != null) { - topicFuture.whenComplete((optionalTopic, e) -> { - if (e != null) { - asyncResponse.resume(new RestException(e)); - return; - } - - if (!optionalTopic.isPresent()) { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, - "Topic is not owned by this broker!")); - return; - } - Topic topicObject = optionalTopic.get(); - if (topicObject instanceof PersistentTopic) { - asyncResponse.resume(((PersistentTopic) topicObject).getTransactionBufferStats()); - } else { - asyncResponse.resume(new RestException(BAD_REQUEST, "Topic is not a persistent topic!")); - } - }); - } else { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, "Topic is not owned by this broker!")); - } - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, "Broker don't support transaction!")); - } + protected CompletableFuture internalGetTransactionBufferStats(boolean authoritative) { + return getExistingPersistentTopicAsync(authoritative) + .thenApply(topic -> topic.getTransactionBufferStats()); } - protected void internalGetPendingAckStats(AsyncResponse asyncResponse, boolean authoritative, String subName) { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - validateTopicOwnership(topicName, authoritative); - CompletableFuture> topicFuture = pulsar().getBrokerService() - .getTopics().get(topicName.toString()); - if (topicFuture != null) { - topicFuture.whenComplete((optionalTopic, e) -> { - if (e != null) { - asyncResponse.resume(new RestException(e)); - return; - } - - if (!optionalTopic.isPresent()) { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, - "Topic is not owned by this broker!")); - return; - } - Topic topicObject = optionalTopic.get(); - if (topicObject instanceof PersistentTopic) { - asyncResponse.resume(((PersistentTopic) topicObject).getTransactionPendingAckStats(subName)); - } else { - asyncResponse.resume(new RestException(BAD_REQUEST, "Topic is not a persistent topic!")); - } - }); - } else { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, "Topic is not owned by this broker!")); - } - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, "Broker don't support transaction!")); - } + protected CompletableFuture internalGetPendingAckStats( + boolean authoritative, String subName) { + return getExistingPersistentTopicAsync(authoritative) + .thenApply(topic -> topic.getTransactionPendingAckStats(subName)); } protected void internalGetTransactionMetadata(AsyncResponse asyncResponse, boolean authoritative, int mostSigBits, long leastSigBits) { try { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - validateTopicOwnership(TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(mostSigBits), - authoritative); - CompletableFuture transactionMetadataFuture = new CompletableFuture<>(); - TxnMeta txnMeta = pulsar().getTransactionMetadataStoreService() - .getTxnMeta(new TxnID(mostSigBits, leastSigBits)).get(); - getTransactionMetadata(txnMeta, transactionMetadataFuture); - asyncResponse.resume(transactionMetadataFuture.get(10, TimeUnit.SECONDS)); - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, - "This Broker is not configured with transactionCoordinatorEnabled=true.")); - } + validateTopicOwnership(TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(mostSigBits), + authoritative); + CompletableFuture transactionMetadataFuture = new CompletableFuture<>(); + TxnMeta txnMeta = pulsar().getTransactionMetadataStoreService() + .getTxnMeta(new TxnID(mostSigBits, leastSigBits)).get(); + getTransactionMetadata(txnMeta, transactionMetadataFuture); + asyncResponse.resume(transactionMetadataFuture.get(10, TimeUnit.SECONDS)); } catch (Exception e) { if (e instanceof ExecutionException) { if (e.getCause() instanceof CoordinatorNotFoundException @@ -381,91 +259,87 @@ private void getTransactionMetadata(TxnMeta txnMeta, protected void internalGetSlowTransactions(AsyncResponse asyncResponse, boolean authoritative, long timeout, Integer coordinatorId) { try { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - if (coordinatorId != null) { - validateTopicOwnership(TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(coordinatorId), - authoritative); - TransactionMetadataStore transactionMetadataStore = - pulsar().getTransactionMetadataStoreService().getStores() - .get(TransactionCoordinatorID.get(coordinatorId)); - if (transactionMetadataStore == null) { - asyncResponse.resume(new RestException(NOT_FOUND, - "Transaction coordinator not found! coordinator id : " + coordinatorId)); + if (coordinatorId != null) { + validateTopicOwnership(TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(coordinatorId), + authoritative); + TransactionMetadataStore transactionMetadataStore = + pulsar().getTransactionMetadataStoreService().getStores() + .get(TransactionCoordinatorID.get(coordinatorId)); + if (transactionMetadataStore == null) { + asyncResponse.resume(new RestException(NOT_FOUND, + "Transaction coordinator not found! coordinator id : " + coordinatorId)); + return; + } + List transactions = transactionMetadataStore.getSlowTransactions(timeout); + List> completableFutures = new ArrayList<>(); + for (TxnMeta txnMeta : transactions) { + CompletableFuture completableFuture = new CompletableFuture<>(); + getTransactionMetadata(txnMeta, completableFuture); + completableFutures.add(completableFuture); + } + + FutureUtil.waitForAll(completableFutures).whenComplete((v, e) -> { + if (e != null) { + asyncResponse.resume(new RestException(e.getCause())); return; } - List transactions = transactionMetadataStore.getSlowTransactions(timeout); - List> completableFutures = new ArrayList<>(); - for (TxnMeta txnMeta : transactions) { - CompletableFuture completableFuture = new CompletableFuture<>(); - getTransactionMetadata(txnMeta, completableFuture); - completableFutures.add(completableFuture); - } - FutureUtil.waitForAll(completableFutures).whenComplete((v, e) -> { + Map transactionMetadata = new HashMap<>(); + for (CompletableFuture future : completableFutures) { + try { + transactionMetadata.put(future.get().txnId, future.get()); + } catch (Exception exception) { + asyncResponse.resume(new RestException(exception.getCause())); + return; + } + } + asyncResponse.resume(transactionMetadata); + }); + } else { + getPartitionedTopicMetadataAsync(TopicName.TRANSACTION_COORDINATOR_ASSIGN, + false, false).thenAccept(partitionMetadata -> { + if (partitionMetadata.partitions == 0) { + asyncResponse.resume(new RestException(Response.Status.NOT_FOUND, + "Transaction coordinator not found")); + return; + } + List>> completableFutures = + Lists.newArrayList(); + for (int i = 0; i < partitionMetadata.partitions; i++) { + try { + completableFutures + .add(pulsar().getAdminClient().transactions() + .getSlowTransactionsByCoordinatorIdAsync(i, timeout, + TimeUnit.MILLISECONDS)); + } catch (PulsarServerException e) { + asyncResponse.resume(new RestException(e)); + return; + } + } + Map transactionMetadataMaps = new HashMap<>(); + FutureUtil.waitForAll(completableFutures).whenComplete((result, e) -> { if (e != null) { - asyncResponse.resume(new RestException(e.getCause())); + asyncResponse.resume(new RestException(e)); return; } - Map transactionMetadata = new HashMap<>(); - for (CompletableFuture future : completableFutures) { + for (CompletableFuture> transactionMetadataMap + : completableFutures) { try { - transactionMetadata.put(future.get().txnId, future.get()); + transactionMetadataMaps.putAll(transactionMetadataMap.get()); } catch (Exception exception) { asyncResponse.resume(new RestException(exception.getCause())); return; } } - asyncResponse.resume(transactionMetadata); - }); - } else { - getPartitionedTopicMetadataAsync(TopicName.TRANSACTION_COORDINATOR_ASSIGN, - false, false).thenAccept(partitionMetadata -> { - if (partitionMetadata.partitions == 0) { - asyncResponse.resume(new RestException(Response.Status.NOT_FOUND, - "Transaction coordinator not found")); - return; - } - List>> completableFutures = - Lists.newArrayList(); - for (int i = 0; i < partitionMetadata.partitions; i++) { - try { - completableFutures - .add(pulsar().getAdminClient().transactions() - .getSlowTransactionsByCoordinatorIdAsync(i, timeout, - TimeUnit.MILLISECONDS)); - } catch (PulsarServerException e) { - asyncResponse.resume(new RestException(e)); - return; - } - } - Map transactionMetadataMaps = new HashMap<>(); - FutureUtil.waitForAll(completableFutures).whenComplete((result, e) -> { - if (e != null) { - asyncResponse.resume(new RestException(e)); - return; - } - - for (CompletableFuture> transactionMetadataMap - : completableFutures) { - try { - transactionMetadataMaps.putAll(transactionMetadataMap.get()); - } catch (Exception exception) { - asyncResponse.resume(new RestException(exception.getCause())); - return; - } - } - asyncResponse.resume(transactionMetadataMaps); - }); - }).exceptionally(ex -> { - log.error("[{}] Failed to get transaction coordinator state.", clientAppId(), ex); - resumeAsyncResponseExceptionally(asyncResponse, ex); - return null; + asyncResponse.resume(transactionMetadataMaps); }); + }).exceptionally(ex -> { + log.error("[{}] Failed to get transaction coordinator state.", clientAppId(), ex); + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); - } - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, "Broker don't support transaction!")); } } catch (Exception e) { asyncResponse.resume(new RestException(e)); @@ -475,101 +349,74 @@ protected void internalGetSlowTransactions(AsyncResponse asyncResponse, protected void internalGetCoordinatorInternalStats(AsyncResponse asyncResponse, boolean authoritative, boolean metadata, int coordinatorId) { try { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - TopicName topicName = TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(coordinatorId); - validateTopicOwnership(topicName, authoritative); - TransactionMetadataStore metadataStore = pulsar().getTransactionMetadataStoreService() - .getStores().get(TransactionCoordinatorID.get(coordinatorId)); - if (metadataStore == null) { - asyncResponse.resume(new RestException(NOT_FOUND, - "Transaction coordinator not found! coordinator id : " + coordinatorId)); - return; - } - if (metadataStore instanceof MLTransactionMetadataStore) { - ManagedLedger managedLedger = ((MLTransactionMetadataStore) metadataStore).getManagedLedger(); - TransactionCoordinatorInternalStats transactionCoordinatorInternalStats = - new TransactionCoordinatorInternalStats(); - TransactionLogStats transactionLogStats = new TransactionLogStats(); - transactionLogStats.managedLedgerName = managedLedger.getName(); - transactionLogStats.managedLedgerInternalStats = - managedLedger.getManagedLedgerInternalStats(metadata).get(); - transactionCoordinatorInternalStats.transactionLogStats = transactionLogStats; - asyncResponse.resume(transactionCoordinatorInternalStats); - } else { - asyncResponse.resume(new RestException(METHOD_NOT_ALLOWED, - "Broker don't use MLTransactionMetadataStore!")); - } + TopicName topicName = TopicName.TRANSACTION_COORDINATOR_ASSIGN.getPartition(coordinatorId); + validateTopicOwnership(topicName, authoritative); + TransactionMetadataStore metadataStore = pulsar().getTransactionMetadataStoreService() + .getStores().get(TransactionCoordinatorID.get(coordinatorId)); + if (metadataStore == null) { + asyncResponse.resume(new RestException(NOT_FOUND, + "Transaction coordinator not found! coordinator id : " + coordinatorId)); + return; + } + if (metadataStore instanceof MLTransactionMetadataStore) { + ManagedLedger managedLedger = ((MLTransactionMetadataStore) metadataStore).getManagedLedger(); + TransactionCoordinatorInternalStats transactionCoordinatorInternalStats = + new TransactionCoordinatorInternalStats(); + TransactionLogStats transactionLogStats = new TransactionLogStats(); + transactionLogStats.managedLedgerName = managedLedger.getName(); + transactionLogStats.managedLedgerInternalStats = + managedLedger.getManagedLedgerInternalStats(metadata).get(); + transactionCoordinatorInternalStats.transactionLogStats = transactionLogStats; + asyncResponse.resume(transactionCoordinatorInternalStats); } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, - "This Broker is not configured with transactionCoordinatorEnabled=true.")); + asyncResponse.resume(new RestException(METHOD_NOT_ALLOWED, + "Broker don't use MLTransactionMetadataStore!")); } } catch (Exception e) { - asyncResponse.resume(new RestException(e.getCause())); + resumeAsyncResponseExceptionally(asyncResponse, e); } } - protected void internalGetPendingAckInternalStats(AsyncResponse asyncResponse, boolean authoritative, - TopicName topicName, String subName, boolean metadata) { - try { - if (pulsar().getConfig().isTransactionCoordinatorEnabled()) { - validateTopicOwnership(topicName, authoritative); - CompletableFuture> topicFuture = pulsar().getBrokerService() - .getTopics().get(topicName.toString()); - if (topicFuture != null) { - topicFuture.whenComplete((optionalTopic, e) -> { - - if (e != null) { - asyncResponse.resume(new RestException(e)); - return; - } - if (!optionalTopic.isPresent()) { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, - "Topic is not owned by this broker!")); - return; - } - Topic topicObject = optionalTopic.get(); - if (topicObject instanceof PersistentTopic) { - try { - ManagedLedger managedLedger = - ((PersistentTopic) topicObject).getPendingAckManagedLedger(subName).get(); - TransactionPendingAckInternalStats stats = - new TransactionPendingAckInternalStats(); + protected CompletableFuture internalGetPendingAckInternalStats( + boolean authoritative, String subName, boolean metadata) { + return getExistingPersistentTopicAsync(authoritative) + .thenCompose(topic -> topic.getPendingAckManagedLedger(subName)) + .thenCompose(managedLedger -> + managedLedger.getManagedLedgerInternalStats(metadata) + .thenApply(internalStats -> { TransactionLogStats pendingAckLogStats = new TransactionLogStats(); pendingAckLogStats.managedLedgerName = managedLedger.getName(); - pendingAckLogStats.managedLedgerInternalStats = - managedLedger.getManagedLedgerInternalStats(metadata).get(); + pendingAckLogStats.managedLedgerInternalStats = internalStats; + return pendingAckLogStats; + }) + .thenApply(pendingAckLogStats -> { + TransactionPendingAckInternalStats stats = new TransactionPendingAckInternalStats(); stats.pendingAckLogStats = pendingAckLogStats; - asyncResponse.resume(stats); - } catch (Exception exception) { - if (exception instanceof ExecutionException) { - if (exception.getCause() instanceof ServiceUnitNotReadyException) { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, - exception.getCause())); - return; - } else if (exception.getCause() instanceof NotAllowedException) { - asyncResponse.resume(new RestException(METHOD_NOT_ALLOWED, - exception.getCause())); - return; - } else if (exception.getCause() instanceof SubscriptionNotFoundException) { - asyncResponse.resume(new RestException(NOT_FOUND, exception.getCause())); - return; - } - } - asyncResponse.resume(new RestException(exception)); - } - } else { - asyncResponse.resume(new RestException(BAD_REQUEST, "Topic is not a persistent topic!")); - } - }); - } else { - asyncResponse.resume(new RestException(TEMPORARY_REDIRECT, "Topic is not owned by this broker!")); - } - } else { - asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, - "This Broker is not configured with transactionCoordinatorEnabled=true.")); + return stats; + }) + ); + } + + protected CompletableFuture getExistingPersistentTopicAsync(boolean authoritative) { + return validateTopicOwnershipAsync(topicName, authoritative).thenCompose(__ -> { + CompletableFuture> topicFuture = pulsar().getBrokerService() + .getTopics().get(topicName.toString()); + if (topicFuture == null) { + return FutureUtil.failedFuture(new RestException(NOT_FOUND, "Topic not found")); } - } catch (Exception e) { - asyncResponse.resume(new RestException(e.getCause())); + return topicFuture.thenCompose(optionalTopic -> { + if (!optionalTopic.isPresent()) { + return FutureUtil.failedFuture(new RestException(NOT_FOUND, "Topic not found")); + } + return CompletableFuture.completedFuture((PersistentTopic) optionalTopic.get()); + }); + }); + } + + protected void checkTransactionCoordinatorEnabled() { + if (!pulsar().getConfig().isTransactionCoordinatorEnabled()) { + throw new RestException(SERVICE_UNAVAILABLE, + "This Broker is not configured with transactionCoordinatorEnabled=true."); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java index dac7e44b9ef1c..ab8ca2d65f140 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/Namespaces.java @@ -129,12 +129,10 @@ public void getTopics(@PathParam("property") String property, @QueryParam("mode") @DefaultValue("PERSISTENT") Mode mode, @Suspended AsyncResponse asyncResponse) { validateNamespaceName(property, cluster, namespace); - validateNamespaceOperation(NamespaceName.get(property, namespace), NamespaceOperation.GET_TOPICS); - - // Validate that namespace exists, throws 404 if it doesn't exist - getNamespacePolicies(namespaceName); - - pulsar().getNamespaceService().getListOfTopics(namespaceName, mode) + validateNamespaceOperationAsync(NamespaceName.get(property, namespace), NamespaceOperation.GET_TOPICS) + // Validate that namespace exists, throws 404 if it doesn't exist + .thenCompose(__ -> getNamespacePoliciesAsync(namespaceName)) + .thenCompose(policies -> internalGetListOfTopics(policies, mode)) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { log.error("Failed to get topics list for namespace {}", namespaceName, ex); @@ -244,6 +242,22 @@ public Map> getPermissions(@PathParam("property") String return policies.auth_policies.getNamespaceAuthentication(); } + @GET + @Path("/{property}/{cluster}/{namespace}/permissions/subscription") + @ApiOperation(value = "Retrieve the permissions for a subscription.") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Property or cluster or namespace doesn't exist"), + @ApiResponse(code = 409, message = "Namespace is not empty")}) + public Map> getPermissionOnSubscription(@PathParam("property") String property, + @PathParam("cluster") String cluster, + @PathParam("namespace") String namespace) { + validateNamespaceName(property, cluster, namespace); + validateNamespaceOperation(NamespaceName.get(property, namespace), NamespaceOperation.GET_PERMISSION); + + Policies policies = getNamespacePolicies(namespaceName); + return policies.auth_policies.getSubscriptionAuthentication(); + } + @POST @Path("/{property}/{cluster}/{namespace}/permissions/{role}") @ApiOperation(hidden = true, value = "Grant a new permission to a role on a namespace.") @@ -460,6 +474,18 @@ public void modifyDeduplication(@PathParam("property") String property, @PathPar internalModifyDeduplication(enableDeduplication); } + @GET + @Path("/{property}/{cluster}/{namespace}/autoTopicCreation") + @ApiOperation(value = "Get autoTopicCreation info in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Tenant or cluster or namespace doesn't exist")}) + public AutoTopicCreationOverride getAutoTopicCreation(@PathParam("property") String property, + @PathParam("cluster") String cluster, + @PathParam("namespace") String namespace) { + validateNamespaceName(property, cluster, namespace); + return internalGetAutoTopicCreation(); + } + @POST @Path("/{property}/{cluster}/{namespace}/autoTopicCreation") @ApiOperation(value = "Override broker's allowAutoTopicCreation setting for a namespace") @@ -521,6 +547,18 @@ public void setAutoSubscriptionCreation( } } + @GET + @Path("/{property}/{cluster}/{namespace}/autoSubscriptionCreation") + @ApiOperation(value = "Get autoSubscriptionCreation info in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Property or cluster or namespace doesn't exist")}) + public AutoSubscriptionCreationOverride getAutoSubscriptionCreation(@PathParam("property") String property, + @PathParam("cluster") String cluster, + @PathParam("namespace") String namespace) { + validateNamespaceName(property, cluster, namespace); + return internalGetAutoSubscriptionCreation(); + } + @DELETE @Path("/{property}/{cluster}/{namespace}/autoSubscriptionCreation") @ApiOperation(value = "Remove override of broker's allowAutoSubscriptionCreation in a namespace") @@ -635,7 +673,8 @@ public void setPublishRate(@PathParam("property") String property, @PathParam("c @GET @Path("/{property}/{cluster}/{namespace}/publishRate") @ApiOperation(hidden = true, - value = "Get publish-rate configured for the namespace, -1 represents not configured yet") + value = "Get publish-rate configured for the namespace, null means publish-rate not configured, " + + "-1 means msg-publish-rate or byte-publish-rate not configured in publish-rate yet") @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist")}) public PublishRate getPublishRate(@PathParam("property") String property, @PathParam("cluster") String cluster, @@ -657,7 +696,8 @@ public void setDispatchRate(@PathParam("property") String property, @PathParam(" @GET @Path("/{property}/{cluster}/{namespace}/dispatchRate") @ApiOperation(hidden = true, - value = "Get dispatch-rate configured for the namespace, -1 represents not configured yet") + value = "Get dispatch-rate configured for the namespace, null means dispatch-rate not configured, " + + "-1 means msg-dispatch-rate or byte-dispatch-rate not configured in dispatch-rate yet") @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist")}) public DispatchRate getDispatchRate(@PathParam("property") String property, @PathParam("cluster") String cluster, @@ -680,8 +720,9 @@ public void setSubscriptionDispatchRate(@PathParam("property") String property, @GET @Path("/{property}/{cluster}/{namespace}/subscriptionDispatchRate") - @ApiOperation(value = - "Get Subscription dispatch-rate configured for the namespace, -1 represents not configured yet") + @ApiOperation(value = "Get subscription dispatch-rate configured for the namespace, null means subscription " + + "dispatch-rate not configured, -1 means msg-dispatch-rate or byte-dispatch-rate not configured " + + "in dispatch-rate yet") @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist")}) public DispatchRate getSubscriptionDispatchRate(@PathParam("property") String property, @@ -706,7 +747,9 @@ public void setReplicatorDispatchRate( @GET @Path("/{tenant}/{cluster}/{namespace}/replicatorDispatchRate") - @ApiOperation(value = "Get replicator dispatch-rate configured for the namespace, -1 represents not configured yet") + @ApiOperation(value = "Get replicator dispatch-rate configured for the namespace, null means replicator " + + "dispatch-rate not configured, -1 means msg-dispatch-rate or byte-dispatch-rate not configured " + + "in dispatch-rate yet") @ApiResponses(value = { @ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist") }) public DispatchRate getReplicatorDispatchRate(@PathParam("tenant") String tenant, @@ -964,6 +1007,18 @@ public void setSubscriptionAuthMode(@PathParam("property") String property, @Pat internalSetSubscriptionAuthMode(subscriptionAuthMode); } + @GET + @Path("/{property}/{cluster}/{namespace}/subscriptionAuthMode") + @ApiOperation(value = "Get subscription auth mode in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Property or cluster or namespace doesn't exist")}) + public SubscriptionAuthMode getSubscriptionAuthMode(@PathParam("property") String property, + @PathParam("cluster") String cluster, + @PathParam("namespace") String namespace) { + validateNamespaceName(property, cluster, namespace); + return internalGetSubscriptionAuthMode(); + } + @POST @Path("/{property}/{cluster}/{namespace}/encryptionRequired") @ApiOperation(hidden = true, value = "Message encryption is required or not for all topics in a namespace") @@ -976,6 +1031,19 @@ public void modifyEncryptionRequired(@PathParam("property") String property, @Pa internalModifyEncryptionRequired(encryptionRequired); } + @GET + @Path("/{property}/{cluster}/{namespace}/encryptionRequired") + @ApiOperation(value = "Get message encryption required status in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Property or cluster or namespace doesn't exist")}) + public Boolean getEncryptionRequired(@PathParam("property") String property, + @PathParam("cluster") String cluster, + @PathParam("namespace") String namespace) { + validateAdminAccessForTenant(property); + validateNamespaceName(property, cluster, namespace); + return internalGetEncryptionRequired(); + } + @GET @Path("/{property}/{cluster}/{namespace}/maxProducersPerTopic") @ApiOperation(value = "Get maxProducersPerTopic config on a namespace.") diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java index 2a5ab77909773..68a6c71994211 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/NonPersistentTopics.java @@ -198,6 +198,7 @@ public void getList(@Suspended final AsyncResponse asyncResponse, @PathParam("pr Policies policies = null; NamespaceName nsName = null; try { + validateNamespaceName(property, cluster, namespace); validateNamespaceOperation(namespaceName, NamespaceOperation.GET_TOPICS); policies = getNamespacePolicies(property, cluster, namespace); nsName = NamespaceName.get(property, cluster, namespace); @@ -232,22 +233,19 @@ public void getList(@Suspended final AsyncResponse asyncResponse, @PathParam("pr } } - final List topics = Lists.newArrayList(); - FutureUtil.waitForAll(futures).handle((result, exception) -> { - for (int i = 0; i < futures.size(); i++) { - try { - if (futures.get(i).isDone() && futures.get(i).get() != null) { - topics.addAll(futures.get(i).get()); + FutureUtil.waitForAll(futures).whenComplete((result, ex) -> { + if (ex != null) { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } else { + final List topics = Lists.newArrayList(); + for (int i = 0; i < futures.size(); i++) { + List topicList = futures.get(i).join(); + if (topicList != null) { + topics.addAll(topicList); } - } catch (InterruptedException | ExecutionException e) { - log.error("[{}] Failed to get list of topics under namespace {}/{}/{}", clientAppId(), property, - cluster, namespace, e); - asyncResponse.resume(new RestException(e instanceof ExecutionException ? e.getCause() : e)); - return null; } + asyncResponse.resume(topics); } - asyncResponse.resume(topics); - return null; }); } @@ -264,6 +262,7 @@ public List getListFromBundle(@PathParam("property") String property, @P @PathParam("bundle") String bundleRange) { log.info("[{}] list of topics on namespace bundle {}/{}/{}/{}", clientAppId(), property, cluster, namespace, bundleRange); + validateNamespaceName(property, cluster, namespace); validateNamespaceOperation(namespaceName, NamespaceOperation.GET_BUNDLE); Policies policies = getNamespacePolicies(property, cluster, namespace); if (!cluster.equals(Constants.GLOBAL_CLUSTER)) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/PersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/PersistentTopics.java index 2917482857363..f3b5e6b56a318 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/PersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v1/PersistentTopics.java @@ -69,7 +69,7 @@ public class PersistentTopics extends PersistentTopicsBase { @Path("/{property}/{cluster}/{namespace}") @ApiOperation(hidden = true, value = "Get the list of topics under a namespace.", response = String.class, responseContainer = "List") - @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin or consume permission on namespace"), @ApiResponse(code = 404, message = "Namespace doesn't exist")}) public void getList(@Suspended final AsyncResponse asyncResponse, @PathParam("property") String property, @PathParam("cluster") String cluster, @PathParam("namespace") String namespace) { @@ -87,7 +87,7 @@ public void getList(@Suspended final AsyncResponse asyncResponse, @PathParam("pr @Path("/{property}/{cluster}/{namespace}/partitioned") @ApiOperation(hidden = true, value = "Get the list of partitioned topics under a namespace.", response = String.class, responseContainer = "List") - @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin or consume permission on namespace"), @ApiResponse(code = 404, message = "Namespace doesn't exist")}) public List getPartitionedTopicList(@PathParam("property") String property, @PathParam("cluster") String cluster, @PathParam("namespace") String namespace) { @@ -194,7 +194,7 @@ public void createNonPartitionedTopic( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateNamespaceName(tenant, cluster, namespace); validateTopicName(tenant, cluster, namespace, encodedTopic); @@ -229,7 +229,7 @@ public void createNonPartitionedTopic( public void updatePartitionedTopic(@PathParam("property") String property, @PathParam("cluster") String cluster, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("updateLocalTopicOnly") @DefaultValue("false") boolean updateLocalTopicOnly, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @QueryParam("force") @DefaultValue("false") boolean force, int numPartitions) { @@ -264,10 +264,11 @@ public void deletePartitionedTopic(@Suspended final AsyncResponse asyncResponse, @PathParam("property") String property, @PathParam("cluster") String cluster, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("force") @DefaultValue("false") boolean force, - @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { + @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, + @QueryParam("deleteSchema") @DefaultValue("false") boolean deleteSchema) { try { validateTopicName(property, cluster, namespace, encodedTopic); - internalDeletePartitionedTopic(asyncResponse, authoritative, force, false); + internalDeletePartitionedTopic(asyncResponse, authoritative, force, deleteSchema); } catch (WebApplicationException wae) { asyncResponse.resume(wae); } catch (Exception e) { @@ -302,9 +303,10 @@ public void unloadTopic(@Suspended final AsyncResponse asyncResponse, @PathParam public void deleteTopic(@PathParam("property") String property, @PathParam("cluster") String cluster, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("force") @DefaultValue("false") boolean force, - @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { + @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, + @QueryParam("deleteSchema") @DefaultValue("false") boolean deleteSchema) { validateTopicName(property, cluster, namespace, encodedTopic); - internalDeleteTopic(authoritative, force); + internalDeleteTopic(authoritative, force, deleteSchema); } @GET @@ -528,7 +530,7 @@ public void expireTopicMessages( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Subscription to be Expiry messages on") @PathParam("subName") String encodedSubName, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(name = "messageId", value = "messageId to reset back to (ledgerId:entryId)") ResetCursorData resetCursorData) { @@ -636,7 +638,7 @@ public void createSubscription(@Suspended final AsyncResponse asyncResponse, @Pa try { validateTopicName(property, cluster, namespace, topic); if (!topicName.isPersistent()) { - throw new RestException(Response.Status.BAD_REQUEST, "Create subscription on non-persistent topic" + throw new RestException(Response.Status.BAD_REQUEST, "Create subscription on non-persistent topic " + "can only be done through client"); } internalCreateSubscription(asyncResponse, decode(encodedSubName), messageId, authoritative, replicated); @@ -830,7 +832,7 @@ public void getLastMessageId( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, cluster, namespace, encodedTopic); @@ -865,7 +867,7 @@ public void setReplicatedSubscriptionStatus( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Name of subscription", required = true) @PathParam("subName") String encodedSubName, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Whether to enable replicated subscription", required = true) boolean enabled) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Bookies.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Bookies.java index 1af839f4c01dc..05eb1dd94f92d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Bookies.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Bookies.java @@ -43,6 +43,7 @@ import org.apache.bookkeeper.discover.RegistrationClient; import org.apache.bookkeeper.meta.MetadataClientDriver; import org.apache.bookkeeper.net.BookieId; +import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.admin.AdminResource; import org.apache.pulsar.broker.web.RestException; import org.apache.pulsar.common.policies.data.BookieInfo; @@ -55,6 +56,7 @@ @Produces(MediaType.APPLICATION_JSON) @Slf4j public class Bookies extends AdminResource { + private static final String PATH_SEPARATOR = "/"; @GET @Path("/racks-info") @@ -162,6 +164,20 @@ public void updateBookieRackInfo(@Suspended final AsyncResponse asyncResponse, throw new RestException(Status.PRECONDITION_FAILED, "Bookie 'group' parameters is missing"); } + // validate rack name + int separatorCnt = StringUtils.countMatches( + StringUtils.strip(bookieInfo.getRack(), PATH_SEPARATOR), PATH_SEPARATOR); + boolean isRackEnabled = pulsar().getConfiguration().isBookkeeperClientRackawarePolicyEnabled(); + boolean isRegionEnabled = pulsar().getConfiguration().isBookkeeperClientRegionawarePolicyEnabled(); + if (isRackEnabled && ((isRegionEnabled && separatorCnt != 1) || (!isRegionEnabled && separatorCnt != 0))) { + asyncResponse.resume(new RestException(Status.PRECONDITION_FAILED, "Bookie 'rack' parameter is invalid, " + + "When `RackawareEnsemblePlacementPolicy` is enabled, the rack name is not allowed to contain " + + "slash (`/`) except for the beginning and end of the rack name string. " + + "When `RegionawareEnsemblePlacementPolicy` is enabled, the rack name can only contain " + + "one slash (`/`) except for the beginning and end of the rack name string.")); + return; + } + getPulsarResources().getBookieResources() .update(optionalBookiesRackConfiguration -> { BookiesRackConfiguration brc = optionalBookiesRackConfiguration diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java index 53eab242d3d55..89efd8f83de8d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/Namespaces.java @@ -99,12 +99,10 @@ public void getTopics(@PathParam("tenant") String tenant, @QueryParam("mode") @DefaultValue("PERSISTENT") Mode mode, @Suspended AsyncResponse asyncResponse) { validateNamespaceName(tenant, namespace); - validateNamespaceOperation(NamespaceName.get(tenant, namespace), NamespaceOperation.GET_TOPICS); - - // Validate that namespace exists, throws 404 if it doesn't exist - getNamespacePolicies(namespaceName); - - pulsar().getNamespaceService().getListOfTopics(namespaceName, mode) + validateNamespaceOperationAsync(NamespaceName.get(tenant, namespace), NamespaceOperation.GET_TOPICS) + // Validate that namespace exists, throws 404 if it doesn't exist + .thenCompose(__ -> getNamespacePoliciesAsync(namespaceName)) + .thenCompose(policies -> internalGetListOfTopics(policies, mode)) .thenAccept(asyncResponse::resume) .exceptionally(ex -> { log.error("Failed to get topics list for namespace {}", namespaceName, ex); @@ -192,6 +190,21 @@ public Map> getPermissions(@PathParam("tenant") String t return policies.auth_policies.getNamespaceAuthentication(); } + @GET + @Path("/{tenant}/{namespace}/permissions/subscription") + @ApiOperation(value = "Retrieve the permissions for a subscription.") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Tenant or cluster or namespace doesn't exist"), + @ApiResponse(code = 409, message = "Namespace is not empty")}) + public Map> getPermissionOnSubscription(@PathParam("tenant") String tenant, + @PathParam("namespace") String namespace) { + validateNamespaceName(tenant, namespace); + validateNamespaceOperation(NamespaceName.get(tenant, namespace), NamespaceOperation.GET_PERMISSION); + + Policies policies = getNamespacePolicies(namespaceName); + return policies.auth_policies.getSubscriptionAuthentication(); + } + @POST @Path("/{tenant}/{namespace}/permissions/{role}") @ApiOperation(value = "Grant a new permission to a role on a namespace.") @@ -273,7 +286,7 @@ public void setNamespaceReplicationClusters(@PathParam("tenant") String tenant, @GET @Path("/{tenant}/{namespace}/messageTTL") - @ApiOperation(value = "Get the message TTL for the namespace") + @ApiOperation(value = "Get the message TTL for the namespace", response = Integer.class) @ApiResponses(value = { @ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Tenant or cluster or namespace doesn't exist") }) public Integer getNamespaceMessageTTL(@PathParam("tenant") String tenant, @@ -382,6 +395,17 @@ public void removeDeduplication(@PathParam("tenant") String tenant, @PathParam(" internalModifyDeduplication(null); } + @GET + @Path("/{tenant}/{namespace}/autoTopicCreation") + @ApiOperation(value = "Get autoTopicCreation info in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Tenant or namespace doesn't exist")}) + public AutoTopicCreationOverride getAutoTopicCreation(@PathParam("tenant") String tenant, + @PathParam("namespace") String namespace) { + validateNamespaceName(tenant, namespace); + return internalGetAutoTopicCreation(); + } + @POST @Path("/{tenant}/{namespace}/autoTopicCreation") @ApiOperation(value = "Override broker's allowAutoTopicCreation setting for a namespace") @@ -443,6 +467,17 @@ public void setAutoSubscriptionCreation( } } + @GET + @Path("/{tenant}/{namespace}/autoSubscriptionCreation") + @ApiOperation(value = "Get autoSubscriptionCreation info in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Tenant or namespace doesn't exist")}) + public AutoSubscriptionCreationOverride getAutoSubscriptionCreation(@PathParam("tenant") String tenant, + @PathParam("namespace") String namespace) { + validateNamespaceName(tenant, namespace); + return internalGetAutoSubscriptionCreation(); + } + @DELETE @Path("/{tenant}/{namespace}/autoSubscriptionCreation") @ApiOperation(value = "Remove override of broker's allowAutoSubscriptionCreation in a namespace") @@ -565,7 +600,8 @@ public void removePublishRate(@PathParam("property") String property, @PathParam @GET @Path("/{property}/{namespace}/publishRate") @ApiOperation(hidden = true, - value = "Get publish-rate configured for the namespace, -1 represents not configured yet") + value = "Get publish-rate configured for the namespace, null means publish-rate not configured, " + + "-1 means msg-publish-rate or byte-publish-rate not configured in publish-rate yet") @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist")}) public PublishRate getPublishRate( @@ -597,7 +633,8 @@ public void deleteDispatchRate(@PathParam("tenant") String tenant, @PathParam("n @GET @Path("/{tenant}/{namespace}/dispatchRate") - @ApiOperation(value = "Get dispatch-rate configured for the namespace, -1 represents not configured yet") + @ApiOperation(value = "Get dispatch-rate configured for the namespace, null means dispatch-rate not configured, " + + "-1 means msg-dispatch-rate or byte-dispatch-rate not configured in dispatch-rate yet") @ApiResponses(value = { @ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist") }) public DispatchRate getDispatchRate(@PathParam("tenant") String tenant, @@ -620,8 +657,9 @@ public void setSubscriptionDispatchRate(@PathParam("tenant") String tenant, @GET @Path("/{tenant}/{namespace}/subscriptionDispatchRate") - @ApiOperation( - value = "Get Subscription dispatch-rate configured for the namespace, -1 represents not configured yet") + @ApiOperation(value = "Get subscription dispatch-rate configured for the namespace, null means subscription " + + "dispatch-rate not configured, -1 means msg-dispatch-rate or byte-dispatch-rate not configured " + + "in dispatch-rate yet") @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist")}) public DispatchRate getSubscriptionDispatchRate(@PathParam("tenant") String tenant, @@ -695,7 +733,9 @@ public void setReplicatorDispatchRate(@PathParam("tenant") String tenant, @GET @Path("/{tenant}/{namespace}/replicatorDispatchRate") - @ApiOperation(value = "Get replicator dispatch-rate configured for the namespace, -1 represents not configured yet") + @ApiOperation(value = "Get replicator dispatch-rate configured for the namespace, null means replicator " + + "dispatch-rate not configured, -1 means msg-dispatch-rate or byte-dispatch-rate not configured " + + "in dispatch-rate yet") @ApiResponses(value = { @ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Namespace does not exist") }) public DispatchRate getReplicatorDispatchRate(@PathParam("tenant") String tenant, @@ -972,6 +1012,17 @@ public void setSubscriptionAuthMode(@PathParam("tenant") String tenant, internalSetSubscriptionAuthMode(subscriptionAuthMode); } + @GET + @Path("/{tenant}/{namespace}/subscriptionAuthMode") + @ApiOperation(value = "Get subscription auth mode in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Tenant or namespace doesn't exist")}) + public SubscriptionAuthMode getSubscriptionAuthMode(@PathParam("tenant") String tenant, + @PathParam("namespace") String namespace) { + validateNamespaceName(tenant, namespace); + return internalGetSubscriptionAuthMode(); + } + @POST @Path("/{tenant}/{namespace}/encryptionRequired") @ApiOperation(value = "Message encryption is required or not for all topics in a namespace") @@ -987,6 +1038,17 @@ public void modifyEncryptionRequired( internalModifyEncryptionRequired(encryptionRequired); } + @GET + @Path("/{tenant}/{namespace}/encryptionRequired") + @ApiOperation(value = "Get message encryption required status in a namespace") + @ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 404, message = "Tenant or namespace doesn't exist")}) + public Boolean getEncryptionRequired(@PathParam("tenant") String tenant, + @PathParam("namespace") String namespace) { + validateNamespaceName(tenant, namespace); + return internalGetEncryptionRequired(); + } + @GET @Path("/{tenant}/{namespace}/delayedDelivery") @ApiOperation(value = "Get delayed delivery messages config on a namespace.") @@ -1626,9 +1688,10 @@ public void setSubscriptionTypesEnabled( @ApiResponses(value = { @ApiResponse(code = 403, message = "Don't have admin permission"), @ApiResponse(code = 404, message = "Tenants or Namespace doesn't exist") }) public boolean getSchemaValidtionEnforced(@PathParam("tenant") String tenant, - @PathParam("namespace") String namespace) { + @PathParam("namespace") String namespace, + @QueryParam("applied") @DefaultValue("false") boolean applied) { validateNamespaceName(tenant, namespace); - return internalGetSchemaValidationEnforced(); + return internalGetSchemaValidationEnforced(applied); } @POST diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java index fa52cb4599714..18f3a2dc1c9b5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/NonPersistentTopics.java @@ -94,7 +94,7 @@ public PartitionedTopicMetadata getPartitionedMetadata( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Is check configuration required to automatically create topic") @QueryParam("checkAllowAutoCreation") @DefaultValue("false") boolean checkAllowAutoCreation) { @@ -152,7 +152,7 @@ public PersistentTopicInternalStats getInternalStats( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @QueryParam("metadata") @DefaultValue("false") boolean metadata) { validateTopicName(tenant, namespace, encodedTopic); @@ -231,7 +231,7 @@ public void getPartitionedStats( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Get per partition stats") @QueryParam("perPartition") @DefaultValue("true") boolean perPartition, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "If return precise backlog or imprecise backlog") @QueryParam("getPreciseBacklog") @DefaultValue("false") boolean getPreciseBacklog, @@ -340,7 +340,7 @@ public void unloadTopic( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -403,26 +403,23 @@ public void getList( } } - final List topics = Lists.newArrayList(); - FutureUtil.waitForAll(futures).handle((result, exception) -> { - for (int i = 0; i < futures.size(); i++) { - try { - if (futures.get(i).isDone() && futures.get(i).get() != null) { - topics.addAll(futures.get(i).get()); + FutureUtil.waitForAll(futures).whenComplete((result, ex) -> { + if (ex != null) { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } else { + final List topics = Lists.newArrayList(); + for (int i = 0; i < futures.size(); i++) { + List topicList = futures.get(i).join(); + if (topicList != null) { + topics.addAll(topicList); } - } catch (InterruptedException | ExecutionException e) { - log.error("[{}] Failed to get list of topics under namespace {}", clientAppId(), namespaceName, e); - asyncResponse.resume(new RestException(e instanceof ExecutionException ? e.getCause() : e)); - return null; } + final List nonPersistentTopics = + topics.stream() + .filter(name -> !TopicName.get(name).isPersistent()) + .collect(Collectors.toList()); + asyncResponse.resume(nonPersistentTopics); } - - final List nonPersistentTopics = - topics.stream() - .filter(name -> !TopicName.get(name).isPersistent()) - .collect(Collectors.toList()); - asyncResponse.resume(nonPersistentTopics); - return null; }); } @@ -520,7 +517,7 @@ public void truncateTopic( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative){ asyncResponse.resume(new RestException(Status.PRECONDITION_FAILED.getStatusCode(), "unsupport truncate")); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java index 09b694cc34e02..8e178da303c9c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java @@ -85,7 +85,7 @@ public class PersistentTopics extends PersistentTopicsBase { response = String.class, responseContainer = "List") @ApiResponses(value = { @ApiResponse(code = 401, message = "Don't have permission to administrate resources on this tenant"), - @ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 403, message = "Don't have admin or consume permission on namespace"), @ApiResponse(code = 404, message = "tenant/namespace/topic doesn't exit"), @ApiResponse(code = 412, message = "Namespace name is not valid"), @ApiResponse(code = 500, message = "Internal server error")}) @@ -111,7 +111,7 @@ public void getList( response = String.class, responseContainer = "List") @ApiResponses(value = { @ApiResponse(code = 401, message = "Don't have permission to administrate resources on this tenant"), - @ApiResponse(code = 403, message = "Don't have admin permission"), + @ApiResponse(code = 403, message = "Don't have admin or consume permission on namespace"), @ApiResponse(code = 404, message = "tenant/namespace/topic doesn't exit"), @ApiResponse(code = 412, message = "Namespace name is not valid"), @ApiResponse(code = 500, message = "Internal server error")}) @@ -234,6 +234,7 @@ public void createPartitionedTopic( validateGlobalNamespaceOwnership(); validatePartitionedTopicName(tenant, namespace, encodedTopic); validateTopicPolicyOperation(topicName, PolicyName.PARTITION, PolicyOperation.WRITE); + validateCreateTopic(topicName); internalCreatePartitionedTopic(asyncResponse, numPartitions, createLocalTopicOnly); } catch (Exception e) { log.error("[{}] Failed to create partitioned topic {}", clientAppId(), topicName, e); @@ -262,11 +263,12 @@ public void createNonPartitionedTopic( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateNamespaceName(tenant, namespace); validateGlobalNamespaceOwnership(); validateTopicName(tenant, namespace, encodedTopic); + validateCreateTopic(topicName); internalCreateNonPartitionedTopic(authoritative); } @@ -281,7 +283,7 @@ public void getOffloadPolicies(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -302,7 +304,7 @@ public void setOffloadPolicies(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Offload policies for the specified topic") OffloadPoliciesImpl offloadPolicies) { validateTopicName(tenant, namespace, encodedTopic); @@ -324,7 +326,7 @@ public void removeOffloadPolicies(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -347,7 +349,7 @@ public void getMaxUnackedMessagesOnConsumer(@Suspended final AsyncResponse async @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -368,7 +370,7 @@ public void setMaxUnackedMessagesOnConsumer( @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Max unacked messages on consumer policies for the specified topic") Integer maxUnackedNum) { @@ -391,7 +393,7 @@ public void deleteMaxUnackedMessagesOnConsumer(@Suspended final AsyncResponse as @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -413,7 +415,7 @@ public void getDeduplicationSnapshotInterval(@Suspended final AsyncResponse asyn @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -440,7 +442,7 @@ public void setDeduplicationSnapshotInterval( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Interval to take deduplication snapshot for the specified topic") Integer interval, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -461,7 +463,7 @@ public void deleteDeduplicationSnapshotInterval(@Suspended final AsyncResponse a @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -484,7 +486,7 @@ public void getInactiveTopicPolicies(@Suspended final AsyncResponse asyncRespons @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -504,7 +506,7 @@ public void setInactiveTopicPolicies(@Suspended final AsyncResponse asyncRespons @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "inactive topic policies for the specified topic") InactiveTopicPolicies inactiveTopicPolicies) { @@ -527,7 +529,7 @@ public void deleteInactiveTopicPolicies(@Suspended final AsyncResponse asyncResp @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -550,7 +552,7 @@ public void getMaxUnackedMessagesOnSubscription(@Suspended final AsyncResponse a @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -572,7 +574,7 @@ public void setMaxUnackedMessagesOnSubscription( @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Max unacked messages on subscription policies for the specified topic") Integer maxUnackedNum) { @@ -598,7 +600,7 @@ public void deleteMaxUnackedMessagesOnSubscription(@Suspended final AsyncRespons @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); validateTopicPolicyOperation(topicName, PolicyName.MAX_UNACKED, PolicyOperation.WRITE); @@ -622,7 +624,7 @@ public void getDelayedDeliveryPolicies(@Suspended final AsyncResponse asyncRespo @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -644,7 +646,7 @@ public void setDelayedDeliveryPolicies( @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Delayed delivery policies for the specified topic") DelayedDeliveryPolicies deliveryPolicies) { @@ -671,7 +673,7 @@ public void deleteDelayedDeliveryPolicies(@Suspended final AsyncResponse asyncRe @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); validatePoliciesReadOnlyAccess(); @@ -723,7 +725,7 @@ public void updatePartitionedTopic( @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, @QueryParam("updateLocalTopicOnly") @DefaultValue("false") boolean updateLocalTopicOnly, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @QueryParam("force") @DefaultValue("false") boolean force, @ApiParam(value = "The number of partitions for the topic", @@ -784,7 +786,7 @@ public PartitionedTopicMetadata getPartitionedMetadata( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Is check configuration required to automatically create topic") @QueryParam("checkAllowAutoCreation") @DefaultValue("false") boolean checkAllowAutoCreation) { @@ -816,7 +818,7 @@ public void deletePartitionedTopic( @ApiParam(value = "Stop all producer/consumer/replicator and delete topic forcefully", defaultValue = "false", type = "boolean") @QueryParam("force") @DefaultValue("false") boolean force, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Delete the topic's schema storage") @QueryParam("deleteSchema") @DefaultValue("false") boolean deleteSchema) { @@ -849,7 +851,7 @@ public void unloadTopic( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -884,7 +886,7 @@ public void deleteTopic( @ApiParam(value = "Stop all producer/consumer/replicator and delete topic forcefully", defaultValue = "false", type = "boolean") @QueryParam("force") @DefaultValue("false") boolean force, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Delete the topic's schema storage") @QueryParam("deleteSchema") @DefaultValue("false") boolean deleteSchema) { @@ -912,7 +914,7 @@ public void getSubscriptions( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -942,7 +944,7 @@ public TopicStats getStats( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "If return precise backlog or imprecise backlog") @QueryParam("getPreciseBacklog") @DefaultValue("false") boolean getPreciseBacklog, @@ -971,7 +973,7 @@ public PersistentTopicInternalStats getInternalStats( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @QueryParam("metadata") @DefaultValue("false") boolean metadata) { validateTopicName(tenant, namespace, encodedTopic); @@ -993,7 +995,7 @@ public void getManagedLedgerInfo( @PathParam("tenant") String tenant, @ApiParam(value = "Specify the namespace", required = true) @PathParam("namespace") String namespace, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @@ -1024,7 +1026,7 @@ public void getPartitionedStats( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Get per partition stats") @QueryParam("perPartition") @DefaultValue("true") boolean perPartition, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "If return precise backlog or imprecise backlog") @QueryParam("getPreciseBacklog") @DefaultValue("false") boolean getPreciseBacklog, @@ -1061,7 +1063,7 @@ public void getPartitionedStatsInternal( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -1100,7 +1102,7 @@ public void deleteSubscription( @ApiParam(value = "Disconnect and close all consumers and delete subscription forcefully", defaultValue = "false", type = "boolean") @QueryParam("force") @DefaultValue("false") boolean force, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -1137,7 +1139,7 @@ public void skipAllMessages( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Name of subscription") @PathParam("subName") String encodedSubName, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -1172,7 +1174,7 @@ public void skipMessages( @PathParam("subName") String encodedSubName, @ApiParam(value = "The number of messages to skip", defaultValue = "0") @PathParam("numMessages") int numMessages, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); internalSkipMessages(decode(encodedSubName), numMessages, authoritative); @@ -1202,7 +1204,7 @@ public void expireTopicMessages( @PathParam("subName") String encodedSubName, @ApiParam(value = "Expires beyond the specified number of seconds", defaultValue = "0") @PathParam("expireTimeInSeconds") int expireTimeInSeconds, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -1237,7 +1239,7 @@ public void expireTopicMessages( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Subscription to be Expiry messages on") @PathParam("subName") String encodedSubName, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(name = "messageId", value = "messageId to reset back to (ledgerId:entryId)") ResetCursorData resetCursorData) { @@ -1277,7 +1279,7 @@ public void expireMessagesForAllSubscriptions( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Expires beyond the specified number of seconds", defaultValue = "0") @PathParam("expireTimeInSeconds") int expireTimeInSeconds, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -1313,7 +1315,7 @@ public void createSubscription( @PathParam("topic") @Encoded String topic, @ApiParam(value = "Subscription to create position on", required = true) @PathParam("subscriptionName") String encodedSubName, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(name = "messageId", value = "messageId where to create the subscription. " + "It can be 'latest', 'earliest' or (ledgerId:entryId)", @@ -1327,7 +1329,7 @@ public void createSubscription( try { validateTopicName(tenant, namespace, topic); if (!topicName.isPersistent()) { - throw new RestException(Response.Status.BAD_REQUEST, "Create subscription on non-persistent topic" + throw new RestException(Response.Status.BAD_REQUEST, "Create subscription on non-persistent topic " + "can only be done through client"); } internalCreateSubscription(asyncResponse, decode(encodedSubName), messageId, authoritative, replicated); @@ -1366,7 +1368,7 @@ public void resetCursor( @PathParam("subName") String encodedSubName, @ApiParam(value = "the timestamp to reset back") @PathParam("timestamp") long timestamp, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -1402,7 +1404,7 @@ public void resetCursorOnPosition( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(name = "subName", value = "Subscription to reset position on", required = true) @PathParam("subName") String encodedSubName, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(name = "messageId", value = "messageId to reset back to (ledgerId:entryId)") ResetCursorData resetCursorData) { @@ -1441,7 +1443,7 @@ public Response peekNthMessage( @PathParam("subName") String encodedSubName, @ApiParam(value = "The number of messages (default 1)", defaultValue = "1") @PathParam("messagePosition") int messagePosition, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); return internalPeekNthMessage(decode(encodedSubName), messagePosition, authoritative); @@ -1473,7 +1475,7 @@ public Response examineMessage( @QueryParam("initialPosition") String initialPosition, @ApiParam(value = "The position of messages (default 1)", defaultValue = "1") @QueryParam("messagePosition") long messagePosition, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); return internalExamineMessage(initialPosition, messagePosition, authoritative); @@ -1504,7 +1506,7 @@ public void getMessageById( @PathParam("ledgerId") long ledgerId, @ApiParam(value = "The entry id", required = true) @PathParam("entryId") long entryId, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -1539,7 +1541,7 @@ public void getMessageIdByTimestamp( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Specify the timestamp", required = true) @PathParam("timestamp") long timestamp, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); internalGetMessageIdByTimestamp(timestamp, authoritative) @@ -1572,7 +1574,7 @@ public PersistentOfflineTopicStats getBacklog( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); return internalGetBacklog(authoritative); @@ -1595,7 +1597,7 @@ public void getBacklogSizeByMessageId( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, MessageIdImpl messageId) { validateTopicName(tenant, namespace, encodedTopic); internalGetBacklogSizeByMessageId(asyncResponse, messageId, authoritative); @@ -1614,7 +1616,7 @@ public void getBacklogQuotaMap( @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1640,7 +1642,7 @@ public void setBacklogQuota( @Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @QueryParam("backlogQuotaType") BacklogQuotaType backlogQuotaType, BacklogQuotaImpl backlogQuota) { validateTopicName(tenant, namespace, encodedTopic); @@ -1665,7 +1667,7 @@ public void removeBacklogQuota(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("backlogQuotaType") BacklogQuotaType backlogQuotaType, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1689,7 +1691,7 @@ public void getMessageTTL(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1725,7 +1727,7 @@ public void setMessageTTL(@Suspended final AsyncResponse asyncResponse, @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "TTL in seconds for the specified namespace", required = true) @QueryParam("messageTTL") Integer messageTTL, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1751,7 +1753,7 @@ public void removeMessageTTL(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1776,7 +1778,7 @@ public void getDeduplication(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1800,7 +1802,7 @@ public void setDeduplication( @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "DeduplicationEnabled policies for the specified topic") Boolean enabled) { @@ -1826,7 +1828,7 @@ public void removeDeduplication(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1851,7 +1853,7 @@ public void getRetention(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1876,7 +1878,7 @@ public void setRetention(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Retention policies for the specified namespace") RetentionPolicies retention) { validateTopicName(tenant, namespace, encodedTopic); @@ -1912,7 +1914,7 @@ public void removeRetention(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1943,7 +1945,7 @@ public void getPersistence(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -1968,7 +1970,7 @@ public void setPersistence(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Bookkeeper persistence policies for specified topic") PersistencePolicies persistencePolicies) { @@ -2005,7 +2007,7 @@ public void removePersistence(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2035,7 +2037,7 @@ public void getMaxSubscriptionsPerTopic(@Suspended final AsyncResponse asyncResp @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2061,7 +2063,7 @@ public void setMaxSubscriptionsPerTopic(@Suspended final AsyncResponse asyncResp @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "The max subscriptions of the topic") int maxSubscriptionsPerTopic) { validateTopicName(tenant, namespace, encodedTopic); @@ -2091,7 +2093,7 @@ public void removeMaxSubscriptionsPerTopic(@Suspended final AsyncResponse asyncR @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2120,7 +2122,7 @@ public void getReplicatorDispatchRate(@Suspended final AsyncResponse asyncRespon @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2145,7 +2147,7 @@ public void setReplicatorDispatchRate(@Suspended final AsyncResponse asyncRespon @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Replicator dispatch rate of the topic") DispatchRateImpl dispatchRate) { validateTopicName(tenant, namespace, encodedTopic); @@ -2175,7 +2177,7 @@ public void removeReplicatorDispatchRate(@Suspended final AsyncResponse asyncRes @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2204,7 +2206,7 @@ public void getMaxProducers(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2229,7 +2231,7 @@ public void setMaxProducers(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "The max producers of the topic") int maxProducers) { validateTopicName(tenant, namespace, encodedTopic); @@ -2261,7 +2263,7 @@ public void removeMaxProducers(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2292,7 +2294,7 @@ public void getMaxConsumers(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2317,7 +2319,7 @@ public void setMaxConsumers(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "The max consumers of the topic") int maxConsumers) { validateTopicName(tenant, namespace, encodedTopic); @@ -2349,7 +2351,7 @@ public void removeMaxConsumers(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2379,7 +2381,7 @@ public void getMaxMessageSize(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2406,7 +2408,7 @@ public void setMaxMessageSize(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "The max message size of the topic") int maxMessageSize) { validateTopicName(tenant, namespace, encodedTopic); @@ -2438,7 +2440,7 @@ public void removeMaxMessageSize(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2479,7 +2481,7 @@ public MessageId terminate( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validatePersistentTopicName(tenant, namespace, encodedTopic); return internalTerminate(authoritative); @@ -2505,7 +2507,8 @@ public void terminatePartitionedTopic(@Suspended final AsyncResponse asyncRespon @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker." + + " For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); internalTerminatePartitionedTopic(asyncResponse, authoritative); @@ -2533,7 +2536,7 @@ public void compact( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -2565,7 +2568,7 @@ public LongRunningProcessStatus compactionStatus( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); return internalCompactionStatus(authoritative); @@ -2593,7 +2596,7 @@ public void triggerOffload( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, MessageIdImpl messageId) { if (messageId == null) { @@ -2623,7 +2626,7 @@ public OffloadProcessStatus offloadStatus( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); return internalOffloadStatus(authoritative); @@ -2650,7 +2653,7 @@ public void getLastMessageId( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { try { validateTopicName(tenant, namespace, encodedTopic); @@ -2673,7 +2676,7 @@ public void getDispatchRate(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2697,7 +2700,7 @@ public void setDispatchRate(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Dispatch rate for the specified topic") DispatchRateImpl dispatchRate) { validateTopicName(tenant, namespace, encodedTopic); @@ -2733,7 +2736,7 @@ public void removeDispatchRate(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2765,7 +2768,7 @@ public void getSubscriptionDispatchRate(@Suspended final AsyncResponse asyncResp @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2790,7 +2793,7 @@ public void setSubscriptionDispatchRate( @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Subscription message dispatch rate for the specified topic") DispatchRateImpl dispatchRate) { @@ -2827,7 +2830,7 @@ public void removeSubscriptionDispatchRate(@Suspended final AsyncResponse asyncR @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2859,7 +2862,7 @@ public void getCompactionThreshold(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2883,7 +2886,7 @@ public void setCompactionThreshold(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Dispatch rate for the specified topic") long compactionThreshold) { validateTopicName(tenant, namespace, encodedTopic); @@ -2919,7 +2922,7 @@ public void removeCompactionThreshold(@Suspended final AsyncResponse asyncRespon @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2950,7 +2953,7 @@ public void getMaxConsumersPerSubscription(@Suspended final AsyncResponse asyncR @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -2976,7 +2979,7 @@ public void setMaxConsumersPerSubscription( @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Dispatch rate for the specified topic") int maxConsumersPerSubscription) { validateTopicName(tenant, namespace, encodedTopic); @@ -3012,7 +3015,7 @@ public void removeMaxConsumersPerSubscription(@Suspended final AsyncResponse asy @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -3044,7 +3047,7 @@ public void getPublishRate(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -3069,7 +3072,7 @@ public void setPublishRate(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Dispatch rate for the specified topic") PublishRate publishRate) { validateTopicName(tenant, namespace, encodedTopic); @@ -3105,7 +3108,7 @@ public void removePublishRate(@Suspended final AsyncResponse asyncResponse, @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -3136,7 +3139,7 @@ public void getSubscriptionTypesEnabled(@Suspended final AsyncResponse asyncResp @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -3163,7 +3166,7 @@ public void setSubscriptionTypesEnabled(@Suspended final AsyncResponse asyncResp @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Enable sub types for the specified topic") Set subscriptionTypesEnabled) { @@ -3201,7 +3204,7 @@ public void getSubscribeRate(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @QueryParam("applied") boolean applied, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative) { validateTopicName(tenant, namespace, encodedTopic); preValidation(authoritative) @@ -3225,7 +3228,7 @@ public void setSubscribeRate( @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Subscribe rate for the specified topic") SubscribeRate subscribeRate) { validateTopicName(tenant, namespace, encodedTopic); @@ -3299,7 +3302,7 @@ public void truncateTopic( @PathParam("namespace") String namespace, @ApiParam(value = "Specify topic name", required = true) @PathParam("topic") @Encoded String encodedTopic, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative){ validateTopicName(tenant, namespace, encodedTopic); internalTruncateTopic(asyncResponse, authoritative); @@ -3329,7 +3332,7 @@ public void setReplicatedSubscriptionStatus( @PathParam("topic") @Encoded String encodedTopic, @ApiParam(value = "Name of subscription", required = true) @PathParam("subName") String encodedSubName, - @ApiParam(value = "Is authentication required to perform this operation") + @ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.") @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @ApiParam(value = "Whether to enable replicated subscription", required = true) boolean enabled) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Packages.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Packages.java index 9d6e5eedd24a5..03a83bf314520 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Packages.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Packages.java @@ -58,7 +58,8 @@ public class Packages extends PackagesBase { @ApiResponse(code = 200, message = "Return the metadata of the specified package."), @ApiResponse(code = 404, message = "The specified package is not existent."), @ApiResponse(code = 412, message = "The package name is illegal."), - @ApiResponse(code = 500, message = "Internal server error.") + @ApiResponse(code = 500, message = "Internal server error."), + @ApiResponse(code = 503, message = "Package Management Service is not enabled in the broker.") } ) public void getMeta( @@ -82,7 +83,8 @@ public void getMeta( @ApiResponse(code = 200, message = "Update the metadata of the specified package successfully."), @ApiResponse(code = 404, message = "The specified package is not existent."), @ApiResponse(code = 412, message = "The package name is illegal."), - @ApiResponse(code = 500, message = "Internal server error.") + @ApiResponse(code = 500, message = "Internal server error."), + @ApiResponse(code = 503, message = "Package Management Service is not enabled in the broker.") } ) @Consumes(MediaType.APPLICATION_JSON) @@ -113,7 +115,8 @@ public void updateMeta( value = { @ApiResponse(code = 200, message = "Upload the specified package successfully."), @ApiResponse(code = 412, message = "The package name is illegal."), - @ApiResponse(code = 500, message = "Internal server error.") + @ApiResponse(code = 500, message = "Internal server error."), + @ApiResponse(code = 503, message = "Package Management Service is not enabled in the broker.") } ) @Consumes(MediaType.MULTIPART_FORM_DATA) @@ -148,7 +151,8 @@ public void upload( @ApiResponse(code = 200, message = "Download the specified package successfully."), @ApiResponse(code = 404, message = "The specified package is not existent."), @ApiResponse(code = 412, message = "The package name is illegal."), - @ApiResponse(code = 500, message = "Internal server error.") + @ApiResponse(code = 500, message = "Internal server error."), + @ApiResponse(code = 503, message = "Package Management Service is not enabled in the broker.") } ) public StreamingOutput download( @@ -168,7 +172,8 @@ public StreamingOutput download( @ApiResponse(code = 200, message = "Delete the specified package successfully."), @ApiResponse(code = 404, message = "The specified package is not existent."), @ApiResponse(code = 412, message = "The package name is illegal."), - @ApiResponse(code = 500, message = "Internal server error.") + @ApiResponse(code = 500, message = "Internal server error."), + @ApiResponse(code = 503, message = "Package Management Service is not enabled in the broker.") } ) @ApiOperation(value = "Delete a package with the package name.") @@ -195,7 +200,8 @@ public void delete( @ApiResponse(code = 200, message = "Return the package versions of the specified package."), @ApiResponse(code = 404, message = "The specified package is not existent."), @ApiResponse(code = 412, message = "The package name is illegal."), - @ApiResponse(code = 500, message = "Internal server error.") + @ApiResponse(code = 500, message = "Internal server error."), + @ApiResponse(code = 503, message = "Package Management Service is not enabled in the broker.") } ) public void listPackageVersion( @@ -219,7 +225,8 @@ public void listPackageVersion( @ApiResponse(code = 200, message = "Return all the specified type package names in the specified namespace."), @ApiResponse(code = 412, message = "The package type is illegal."), - @ApiResponse(code = 500, message = "Internal server error.") + @ApiResponse(code = 500, message = "Internal server error."), + @ApiResponse(code = 503, message = "Package Management Service is not enabled in the broker.") } ) public void listPackages( diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java index 94411f6d16df8..9cb825b9f8e1f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v3/Transactions.java @@ -18,6 +18,9 @@ */ package org.apache.pulsar.broker.admin.v3; +import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED; +import static javax.ws.rs.core.Response.Status.NOT_FOUND; +import static javax.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiResponse; @@ -33,14 +36,17 @@ import javax.ws.rs.container.AsyncResponse; import javax.ws.rs.container.Suspended; import javax.ws.rs.core.MediaType; +import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.admin.impl.TransactionsBase; -import org.apache.pulsar.common.naming.TopicDomain; -import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.broker.service.BrokerServiceException; +import org.apache.pulsar.broker.web.RestException; +import org.apache.pulsar.common.util.FutureUtil; @Path("/transactions") @Produces(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_JSON) @Api(value = "/transactions", description = "Transactions admin apis", tags = "transactions") +@Slf4j public class Transactions extends TransactionsBase { @GET @@ -55,6 +61,7 @@ public void getCoordinatorStats(@Suspended final AsyncResponse asyncResponse, @QueryParam("authoritative") @DefaultValue("false") boolean authoritative, @QueryParam("coordinatorId") Integer coordinatorId) { + checkTransactionCoordinatorEnabled(); internalGetCoordinatorStats(asyncResponse, authoritative, coordinatorId); } @@ -76,9 +83,19 @@ public void getTransactionInBufferStats(@Suspended final AsyncResponse asyncResp @PathParam("topic") @Encoded String encodedTopic, @PathParam("mostSigBits") String mostSigBits, @PathParam("leastSigBits") String leastSigBits) { - validateTopicName(tenant, namespace, encodedTopic); - internalGetTransactionInBufferStats(asyncResponse, authoritative, - Long.parseLong(mostSigBits), Long.parseLong(leastSigBits)); + try { + checkTransactionCoordinatorEnabled(); + validateTopicName(tenant, namespace, encodedTopic); + internalGetTransactionInBufferStats(authoritative, Long.parseLong(mostSigBits), + Long.parseLong(leastSigBits)) + .thenAccept(stat -> asyncResponse.resume(stat)) + .exceptionally(ex -> { + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); + } catch (Exception ex) { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } } @GET @@ -100,9 +117,19 @@ public void getTransactionInPendingAckStats(@Suspended final AsyncResponse async @PathParam("mostSigBits") String mostSigBits, @PathParam("leastSigBits") String leastSigBits, @PathParam("subName") String subName) { - validateTopicName(tenant, namespace, encodedTopic); - internalGetTransactionInPendingAckStats(asyncResponse, authoritative, Long.parseLong(mostSigBits), - Long.parseLong(leastSigBits), subName); + try { + checkTransactionCoordinatorEnabled(); + validateTopicName(tenant, namespace, encodedTopic); + internalGetTransactionInPendingAckStats(authoritative, Long.parseLong(mostSigBits), + Long.parseLong(leastSigBits), subName) + .thenAccept(stat -> asyncResponse.resume(stat)) + .exceptionally(ex -> { + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); + } catch (Exception ex) { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } } @GET @@ -121,8 +148,18 @@ public void getTransactionBufferStats(@Suspended final AsyncResponse asyncRespon @PathParam("tenant") String tenant, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic) { - validateTopicName(tenant, namespace, encodedTopic); - internalGetTransactionBufferStats(asyncResponse, authoritative); + try { + checkTransactionCoordinatorEnabled(); + validateTopicName(tenant, namespace, encodedTopic); + internalGetTransactionBufferStats(authoritative) + .thenAccept(stat -> asyncResponse.resume(stat)) + .exceptionally(ex -> { + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); + } catch (Exception ex) { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } } @GET @@ -142,8 +179,18 @@ public void getPendingAckStats(@Suspended final AsyncResponse asyncResponse, @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic, @PathParam("subName") String subName) { - validateTopicName(tenant, namespace, encodedTopic); - internalGetPendingAckStats(asyncResponse, authoritative, subName); + try { + checkTransactionCoordinatorEnabled(); + validateTopicName(tenant, namespace, encodedTopic); + internalGetPendingAckStats(authoritative, subName) + .thenAccept(stats -> asyncResponse.resume(stats)) + .exceptionally(ex -> { + resumeAsyncResponseExceptionally(asyncResponse, ex); + return null; + }); + } catch (Exception ex) { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } } @GET @@ -162,6 +209,7 @@ public void getTransactionMetadata(@Suspended final AsyncResponse asyncResponse, @DefaultValue("false") boolean authoritative, @PathParam("mostSigBits") String mostSigBits, @PathParam("leastSigBits") String leastSigBits) { + checkTransactionCoordinatorEnabled(); internalGetTransactionMetadata(asyncResponse, authoritative, Integer.parseInt(mostSigBits), Long.parseLong(leastSigBits)); } @@ -182,6 +230,7 @@ public void getSlowTransactions(@Suspended final AsyncResponse asyncResponse, @DefaultValue("false") boolean authoritative, @PathParam("timeout") String timeout, @QueryParam("coordinatorId") Integer coordinatorId) { + checkTransactionCoordinatorEnabled(); internalGetSlowTransactions(asyncResponse, authoritative, Long.parseLong(timeout), coordinatorId); } @@ -199,6 +248,7 @@ public void getCoordinatorInternalStats(@Suspended final AsyncResponse asyncResp @DefaultValue("false") boolean authoritative, @PathParam("coordinatorId") String coordinatorId, @QueryParam("metadata") @DefaultValue("false") boolean metadata) { + checkTransactionCoordinatorEnabled(); internalGetCoordinatorInternalStats(asyncResponse, authoritative, metadata, Integer.parseInt(coordinatorId)); } @@ -222,7 +272,27 @@ public void getPendingAckInternalStats(@Suspended final AsyncResponse asyncRespo @PathParam("topic") @Encoded String encodedTopic, @PathParam("subName") String subName, @QueryParam("metadata") @DefaultValue("false") boolean metadata) { - internalGetPendingAckInternalStats(asyncResponse, authoritative, - TopicName.get(TopicDomain.persistent.value(), tenant, namespace, encodedTopic), subName, metadata); + try { + checkTransactionCoordinatorEnabled(); + validateTopicName(tenant, namespace, encodedTopic); + internalGetPendingAckInternalStats(authoritative, subName, metadata) + .thenAccept(stats -> asyncResponse.resume(stats)) + .exceptionally(ex -> { + Throwable cause = FutureUtil.unwrapCompletionException(ex); + log.error("[{}] Failed to get pending ack internal stats {}", clientAppId(), topicName, cause); + if (cause instanceof BrokerServiceException.ServiceUnitNotReadyException) { + asyncResponse.resume(new RestException(SERVICE_UNAVAILABLE, cause)); + } else if (cause instanceof BrokerServiceException.NotAllowedException) { + asyncResponse.resume(new RestException(METHOD_NOT_ALLOWED, cause)); + } else if (cause instanceof BrokerServiceException.SubscriptionNotFoundException) { + asyncResponse.resume(new RestException(NOT_FOUND, cause)); + } else { + asyncResponse.resume(new RestException(cause)); + } + return null; + }); + } catch (Exception ex) { + resumeAsyncResponseExceptionally(asyncResponse, ex); + } } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/cache/BundlesQuotas.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/cache/BundlesQuotas.java index 251203eb12de7..88ddaf15314c7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/cache/BundlesQuotas.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/cache/BundlesQuotas.java @@ -73,7 +73,7 @@ public CompletableFuture getResourceQuota(NamespaceBundle bundle) } public CompletableFuture getResourceQuota(String bundle) { - return resourceQuotaCache.get(RESOURCE_QUOTA_ROOT + "/" + bundle.toString()) + return resourceQuotaCache.get(RESOURCE_QUOTA_ROOT + "/" + bundle) .thenCompose(optResourceQuota -> { if (optResourceQuota.isPresent()) { return CompletableFuture.completedFuture(optResourceQuota.get()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/DelayedDeliveryTracker.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/DelayedDeliveryTracker.java index 2fbd9a51d4ab4..35853d3599b0f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/DelayedDeliveryTracker.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/DelayedDeliveryTracker.java @@ -55,6 +55,11 @@ public interface DelayedDeliveryTracker extends AutoCloseable { */ Set getScheduledMessages(int maxMessages); + /** + * Tells whether the dispatcher should pause any message deliveries, until the DelayedDeliveryTracker has + * more messages available. + */ + boolean shouldPauseAllDeliveries(); /** * Reset tick time use zk policies cache. diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java index 80ec2185e567b..dca5a0b7ee9c2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTracker.java @@ -33,7 +33,7 @@ @Slf4j public class InMemoryDelayedDeliveryTracker implements DelayedDeliveryTracker, TimerTask { - private final TripleLongPriorityQueue priorityQueue = new TripleLongPriorityQueue(); + protected final TripleLongPriorityQueue priorityQueue = new TripleLongPriorityQueue(); private final PersistentDispatcherMultipleConsumers dispatcher; @@ -41,45 +41,92 @@ public class InMemoryDelayedDeliveryTracker implements DelayedDeliveryTracker, T private final Timer timer; // Current timeout or null if not set - private Timeout timeout; + protected Timeout timeout; // Timestamp at which the timeout is currently set private long currentTimeoutTarget; + // Last time the TimerTask was triggered for this class + private long lastTickRun; + private long tickTimeMillis; private final Clock clock; - InMemoryDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispatcher, Timer timer, long tickTimeMillis) { - this(dispatcher, timer, tickTimeMillis, Clock.systemUTC()); + private final boolean isDelayedDeliveryDeliverAtTimeStrict; + + // If we detect that all messages have fixed delay time, such that the delivery is + // always going to be in FIFO order, then we can avoid pulling all the messages in + // tracker. Instead, we use the lookahead for detection and pause the read from + // the cursor if the delays are fixed. + private final long fixedDelayDetectionLookahead; + + // This is the timestamp of the message with the highest delivery time + // If new added messages are lower than this, it means the delivery is requested + // to be out-of-order. It gets reset to 0, once the tracker is emptied. + private long highestDeliveryTimeTracked = 0; + + // Track whether we have seen all messages with fixed delay so far. + private boolean messagesHaveFixedDelay = true; + + InMemoryDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispatcher, Timer timer, long tickTimeMillis, + boolean isDelayedDeliveryDeliverAtTimeStrict, + long fixedDelayDetectionLookahead) { + this(dispatcher, timer, tickTimeMillis, Clock.systemUTC(), isDelayedDeliveryDeliverAtTimeStrict, + fixedDelayDetectionLookahead); } InMemoryDelayedDeliveryTracker(PersistentDispatcherMultipleConsumers dispatcher, Timer timer, - long tickTimeMillis, Clock clock) { + long tickTimeMillis, Clock clock, + boolean isDelayedDeliveryDeliverAtTimeStrict, + long fixedDelayDetectionLookahead) { this.dispatcher = dispatcher; this.timer = timer; this.tickTimeMillis = tickTimeMillis; this.clock = clock; + this.isDelayedDeliveryDeliverAtTimeStrict = isDelayedDeliveryDeliverAtTimeStrict; + this.fixedDelayDetectionLookahead = fixedDelayDetectionLookahead; + } + + /** + * When {@link #isDelayedDeliveryDeliverAtTimeStrict} is false, we allow for early delivery by as much as the + * {@link #tickTimeMillis} because it is a slight optimization to let messages skip going back into the delay + * tracker for a brief amount of time when we're already trying to dispatch to the consumer. + * + * When {@link #isDelayedDeliveryDeliverAtTimeStrict} is true, we use the current time to determine when messages + * can be delivered. As a consequence, there are two delays that will affect delivery. The first is the + * {@link #tickTimeMillis} and the second is the {@link Timer}'s granularity. + * + * @return the cutoff time to determine whether a message is ready to deliver to the consumer + */ + private long getCutoffTime() { + return isDelayedDeliveryDeliverAtTimeStrict ? clock.millis() : clock.millis() + tickTimeMillis; } @Override - public boolean addMessage(long ledgerId, long entryId, long deliveryAt) { - long now = clock.millis(); + public boolean addMessage(long ledgerId, long entryId, long deliverAt) { + if (deliverAt < 0 || deliverAt <= getCutoffTime()) { + messagesHaveFixedDelay = false; + return false; + } + if (log.isDebugEnabled()) { log.debug("[{}] Add message {}:{} -- Delivery in {} ms ", dispatcher.getName(), ledgerId, entryId, - deliveryAt - now); - } - if (deliveryAt < (now + tickTimeMillis)) { - // It's already about time to deliver this message. We add the buffer of - // `tickTimeMillis` because messages can be extracted from the tracker - // slightly before the expiration time. We don't want the messages to - // go back into the delay tracker (for a brief amount of time) when we're - // trying to dispatch to the consumer. - return false; + deliverAt - clock.millis()); } - priorityQueue.add(deliveryAt, ledgerId, entryId); + + priorityQueue.add(deliverAt, ledgerId, entryId); updateTimer(); + + // Check that new delivery time comes after the current highest, or at + // least within a single tick time interval of 1 second. + if (deliverAt < (highestDeliveryTimeTracked - tickTimeMillis)) { + messagesHaveFixedDelay = false; + } + + highestDeliveryTimeTracked = Math.max(highestDeliveryTimeTracked, deliverAt); + return true; } @@ -88,11 +135,8 @@ public boolean addMessage(long ledgerId, long entryId, long deliveryAt) { */ @Override public boolean hasMessageAvailable() { - // Avoid the TimerTask run before reach the timeout. - long cutOffTime = clock.millis() + tickTimeMillis; - boolean hasMessageAvailable = !priorityQueue.isEmpty() && priorityQueue.peekN1() <= cutOffTime; + boolean hasMessageAvailable = !priorityQueue.isEmpty() && priorityQueue.peekN1() <= getCutoffTime(); if (!hasMessageAvailable) { - // prevent the first delay message later than cutoffTime updateTimer(); } return hasMessageAvailable; @@ -105,11 +149,7 @@ public boolean hasMessageAvailable() { public Set getScheduledMessages(int maxMessages) { int n = maxMessages; Set positions = new TreeSet<>(); - long now = clock.millis(); - // Pick all the messages that will be ready within the tick time period. - // This is to avoid keeping rescheduling the timer for each message at - // very short delay - long cutoffTime = now + tickTimeMillis; + long cutoffTime = getCutoffTime(); while (n > 0 && !priorityQueue.isEmpty()) { long timestamp = priorityQueue.peekN1(); @@ -128,6 +168,13 @@ public Set getScheduledMessages(int maxMessages) { if (log.isDebugEnabled()) { log.debug("[{}] Get scheduled messages - found {}", dispatcher.getName(), positions.size()); } + + if (priorityQueue.isEmpty()) { + // Reset to initial state + highestDeliveryTimeTracked = 0; + messagesHaveFixedDelay = true; + } + updateTimer(); return positions; } @@ -149,6 +196,17 @@ public long getNumberOfDelayedMessages() { return priorityQueue.size(); } + /** + * Update the scheduled timer task such that: + * 1. If there are no delayed messages, return and do not schedule a timer task. + * 2. If the next message in the queue has the same deliverAt time as the timer task, return and leave existing + * timer task in place. + * 3. If the deliverAt time for the next delayed message has already passed (i.e. the delay is negative), return + * without scheduling a timer task since the subscription is backlogged. + * 4. Else, schedule a timer task where the delay is the greater of these two: the next message's deliverAt time or + * the last tick time plus the tickTimeMillis (to ensure we do not schedule the task more frequently than the + * tickTimeMillis). + */ private void updateTimer() { if (priorityQueue.isEmpty()) { if (timeout != null) { @@ -169,10 +227,8 @@ private void updateTimer() { timeout.cancel(); } - long delayMillis = timestamp - clock.millis(); - if (log.isDebugEnabled()) { - log.debug("[{}] Start timer in {} millis", dispatcher.getName(), delayMillis); - } + long now = clock.millis(); + long delayMillis = timestamp - now; if (delayMillis < 0) { // There are messages that are already ready to be delivered. If @@ -184,8 +240,18 @@ private void updateTimer() { return; } + // Compute the earliest time that we schedule the timer to run. + long remainingTickDelayMillis = lastTickRun + tickTimeMillis - now; + long calculatedDelayMillis = Math.max(delayMillis, remainingTickDelayMillis); + + if (log.isDebugEnabled()) { + log.debug("[{}] Start timer in {} millis", dispatcher.getName(), calculatedDelayMillis); + } + + // Even though we may delay longer than this timestamp because of the tick delay, we still track the + // current timeout with reference to the next message's timestamp. currentTimeoutTarget = timestamp; - timeout = timer.newTimeout(this, delayMillis, TimeUnit.MILLISECONDS); + timeout = timer.newTimeout(this, calculatedDelayMillis, TimeUnit.MILLISECONDS); } @Override @@ -193,22 +259,33 @@ public void run(Timeout timeout) throws Exception { if (log.isDebugEnabled()) { log.debug("[{}] Timer triggered", dispatcher.getName()); } - if (timeout.isCancelled()) { + if (timeout == null || timeout.isCancelled()) { return; } synchronized (dispatcher) { + lastTickRun = clock.millis(); currentTimeoutTarget = -1; - timeout = null; + this.timeout = null; dispatcher.readMoreEntries(); } } @Override public void close() { - priorityQueue.close(); if (timeout != null) { timeout.cancel(); + timeout = null; } + priorityQueue.close(); + } + + @Override + public boolean shouldPauseAllDeliveries() { + // Pause deliveries if we know all delays are fixed within the lookahead window + return fixedDelayDetectionLookahead > 0 + && messagesHaveFixedDelay + && priorityQueue.size() >= fixedDelayDetectionLookahead + && !hasMessageAvailable(); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java index b1a9b2633699c..7bf0ca87c40c7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/delayed/InMemoryDelayedDeliveryTrackerFactory.java @@ -31,16 +31,23 @@ public class InMemoryDelayedDeliveryTrackerFactory implements DelayedDeliveryTra private long tickTimeMillis; + private boolean isDelayedDeliveryDeliverAtTimeStrict; + + private long fixedDelayDetectionLookahead; + @Override public void initialize(ServiceConfiguration config) { this.timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-delayed-delivery"), config.getDelayedDeliveryTickTimeMillis(), TimeUnit.MILLISECONDS); this.tickTimeMillis = config.getDelayedDeliveryTickTimeMillis(); + this.isDelayedDeliveryDeliverAtTimeStrict = config.isDelayedDeliveryDeliverAtTimeStrict(); + this.fixedDelayDetectionLookahead = config.getDelayedDeliveryFixedDelayDetectionLookahead(); } @Override public DelayedDeliveryTracker newTracker(PersistentDispatcherMultipleConsumers dispatcher) { - return new InMemoryDelayedDeliveryTracker(dispatcher, timer, tickTimeMillis); + return new InMemoryDelayedDeliveryTracker(dispatcher, timer, tickTimeMillis, + isDelayedDeliveryDeliverAtTimeStrict, fixedDelayDetectionLookahead); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoader.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoader.java index e1e23e414d8f9..6b06920a9bf2a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoader.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoader.java @@ -26,6 +26,7 @@ import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.Entry; +import org.apache.pulsar.broker.ClassLoaderSwitcher; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.ServerCnx; import org.apache.pulsar.broker.service.Subscription; @@ -50,39 +51,53 @@ public void beforeSendMessage(Subscription subscription, Entry entry, long[] ackSet, MessageMetadata msgMetadata) { - this.interceptor.beforeSendMessage( - subscription, entry, ackSet, msgMetadata); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + this.interceptor.beforeSendMessage( + subscription, entry, ackSet, msgMetadata); + } } @Override public void onPulsarCommand(BaseCommand command, ServerCnx cnx) throws InterceptException { - this.interceptor.onPulsarCommand(command, cnx); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + this.interceptor.onPulsarCommand(command, cnx); + } } @Override public void onConnectionClosed(ServerCnx cnx) { - this.interceptor.onConnectionClosed(cnx); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + this.interceptor.onConnectionClosed(cnx); + } } @Override public void onWebserviceRequest(ServletRequest request) throws IOException, ServletException, InterceptException { - this.interceptor.onWebserviceRequest(request); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + this.interceptor.onWebserviceRequest(request); + } } @Override public void onWebserviceResponse(ServletRequest request, ServletResponse response) throws IOException, ServletException { - this.interceptor.onWebserviceResponse(request, response); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + this.interceptor.onWebserviceResponse(request, response); + } } @Override public void initialize(PulsarService pulsarService) throws Exception { - this.interceptor.initialize(pulsarService); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + this.interceptor.initialize(pulsarService); + } } @Override public void close() { - interceptor.close(); + try (ClassLoaderSwitcher ignored = new ClassLoaderSwitcher(classLoader)) { + interceptor.close(); + } try { classLoader.close(); } catch (IOException e) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/ManagedLedgerInterceptorImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/ManagedLedgerInterceptorImpl.java index bbab84ba1f093..424797fa52a51 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/ManagedLedgerInterceptorImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/intercept/ManagedLedgerInterceptorImpl.java @@ -35,11 +35,8 @@ public class ManagedLedgerInterceptorImpl implements ManagedLedgerInterceptor { private static final Logger log = LoggerFactory.getLogger(ManagedLedgerInterceptorImpl.class); private static final String INDEX = "index"; - - private final Set brokerEntryMetadataInterceptors; - public ManagedLedgerInterceptorImpl(Set brokerEntryMetadataInterceptors) { this.brokerEntryMetadataInterceptors = brokerEntryMetadataInterceptors; } @@ -108,6 +105,7 @@ public CompletableFuture onManagedLedgerLastLedgerInitialize(String name, entries.close(); promise.complete(null); } catch (Exception e) { + entries.close(); log.error("[{}] Failed to recover the index generator from the last add confirmed entry.", name, e); promise.completeExceptionally(e); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java index a469c5c24ddb9..4243420391993 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/LoadData.java @@ -20,8 +20,11 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; import org.apache.pulsar.broker.BrokerData; import org.apache.pulsar.broker.BundleData; +import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.common.naming.NamespaceBundle; /** * This class represents all data that could be relevant when making a load management decision. @@ -59,6 +62,13 @@ public Map getBundleData() { return bundleData; } + public Map getBundleDataForLoadShedding() { + return bundleData.entrySet().stream() + .filter(e -> !NamespaceService.isSystemServiceNamespace( + NamespaceBundle.getBundleNamespace(e.getKey()))) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + public Map getRecentlyUnloadedBundles() { return recentlyUnloadedBundles; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java index 1afde4e365713..51becdb7f77cd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/ResourceUnit.java @@ -22,9 +22,14 @@ ResourceUnit represents any machine/unit which has resources that broker can use to serve its service units */ public interface ResourceUnit extends Comparable { + + String PROPERTY_KEY_BROKER_ZNODE_NAME = "__advertised_addr"; + String getResourceId(); ResourceDescription getAvailableResource(); boolean canFit(ResourceDescription resourceDescription); + + Object getProperty(String key); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTask.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTask.java index e81fb506f290e..f910c2fe7729f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTask.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTask.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.loadbalance.impl; +import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -39,12 +40,16 @@ public class BundleSplitterTask implements BundleSplitStrategy { private static final Logger log = LoggerFactory.getLogger(BundleSplitStrategy.class); private final Set bundleCache; + private final Map namespaceBundleCount; + + /** * Construct a BundleSplitterTask. * */ public BundleSplitterTask() { bundleCache = new HashSet<>(); + namespaceBundleCount = new HashMap<>(); } /** @@ -61,19 +66,21 @@ public BundleSplitterTask() { @Override public Set findBundlesToSplit(final LoadData loadData, final PulsarService pulsar) { bundleCache.clear(); + namespaceBundleCount.clear(); final ServiceConfiguration conf = pulsar.getConfiguration(); int maxBundleCount = conf.getLoadBalancerNamespaceMaximumBundles(); long maxBundleTopics = conf.getLoadBalancerNamespaceBundleMaxTopics(); long maxBundleSessions = conf.getLoadBalancerNamespaceBundleMaxSessions(); long maxBundleMsgRate = conf.getLoadBalancerNamespaceBundleMaxMsgRate(); long maxBundleBandwidth = conf.getLoadBalancerNamespaceBundleMaxBandwidthMbytes() * LoadManagerShared.MIBI; + loadData.getBrokerData().forEach((broker, brokerData) -> { LocalBrokerData localData = brokerData.getLocalData(); for (final Map.Entry entry : localData.getLastStats().entrySet()) { final String bundle = entry.getKey(); final NamespaceBundleStats stats = entry.getValue(); - if (stats.topics == 1) { - log.info("namespace bundle {} only have 1 topic", bundle); + if (stats.topics < 2) { + log.info("The count of topics on the bundle {} is less than 2,skip split!", bundle); continue; } double totalMessageRate = 0; @@ -90,8 +97,11 @@ public Set findBundlesToSplit(final LoadData loadData, final PulsarServi try { final int bundleCount = pulsar.getNamespaceService() .getBundleCount(NamespaceName.get(namespace)); - if (bundleCount < maxBundleCount) { + if ((bundleCount + namespaceBundleCount.getOrDefault(namespace, 0)) + < maxBundleCount) { bundleCache.add(bundle); + int bundleNum = namespaceBundleCount.getOrDefault(namespace, 0); + namespaceBundleCount.put(namespace, bundleNum + 1); } else { log.warn( "Could not split namespace bundle {} because namespace {} has too many bundles: {}", diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/GenericBrokerHostUsageImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/GenericBrokerHostUsageImpl.java index f55d75a4ce16d..1405e8e3953ac 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/GenericBrokerHostUsageImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/GenericBrokerHostUsageImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.loadbalance.impl; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.sun.management.OperatingSystemMXBean; import java.lang.management.ManagementFactory; import java.util.concurrent.ScheduledExecutorService; @@ -53,9 +54,10 @@ public GenericBrokerHostUsageImpl(int hostUsageCheckIntervalMin, this.totalCpuLimit = getTotalCpuLimit(); // Call now to initialize values before the constructor returns calculateBrokerHostUsage(); - executorService.scheduleAtFixedRate(this::checkCpuLoad, CPU_CHECK_MILLIS, + executorService.scheduleWithFixedDelay(catchingAndLoggingThrowables(this::checkCpuLoad), CPU_CHECK_MILLIS, CPU_CHECK_MILLIS, TimeUnit.MILLISECONDS); - executorService.scheduleAtFixedRate(this::doCalculateBrokerHostUsage, hostUsageCheckIntervalMin, + executorService.scheduleWithFixedDelay(catchingAndLoggingThrowables(this::doCalculateBrokerHostUsage), + hostUsageCheckIntervalMin, hostUsageCheckIntervalMin, TimeUnit.MINUTES); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java index 8e4e3c57f1de7..95cefd35d6073 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LinuxBrokerHostUsageImpl.java @@ -18,10 +18,12 @@ */ package org.apache.pulsar.broker.loadbalance.impl; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.base.Charsets; import com.sun.management.OperatingSystemMXBean; import java.io.IOException; import java.lang.management.ManagementFactory; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -86,7 +88,8 @@ public LinuxBrokerHostUsageImpl(int hostUsageCheckIntervalMin, // Call now to initialize values before the constructor returns calculateBrokerHostUsage(); - executorService.scheduleAtFixedRate(this::calculateBrokerHostUsage, hostUsageCheckIntervalMin, + executorService.scheduleWithFixedDelay(catchingAndLoggingThrowables(this::calculateBrokerHostUsage), + hostUsageCheckIntervalMin, hostUsageCheckIntervalMin, TimeUnit.MINUTES); } @@ -103,9 +106,14 @@ public void calculateBrokerHostUsage() { double totalNicUsageRx = getTotalNicUsageRxKb(nics); double totalCpuLimit = getTotalCpuLimit(); - SystemResourceUsage usage = new SystemResourceUsage(); long now = System.currentTimeMillis(); double elapsedSeconds = (now - lastCollection) / 1000d; + if (elapsedSeconds <= 0) { + log.warn("elapsedSeconds {} is not expected, skip this round of calculateBrokerHostUsage", elapsedSeconds); + return; + } + + SystemResourceUsage usage = new SystemResourceUsage(); double cpuUsage = getTotalCpuUsage(elapsedSeconds); if (lastCollection == 0L) { @@ -219,16 +227,18 @@ public int getNicCount() { } private boolean isPhysicalNic(Path path) { - if (!path.toString().contains("/virtual/")) { - try { - Files.readAllBytes(path.resolve("speed")); - return true; - } catch (Exception e) { - // wireless nics don't report speed, ignore them. + try { + if (path.toRealPath().toString().contains("/virtual/")) { return false; } + // Check the type to make sure it's ethernet (type "1") + String type = new String(Files.readAllBytes(path.resolve("type")), StandardCharsets.UTF_8).trim(); + // wireless NICs don't report speed, ignore them. + return Integer.parseInt(type) == 1; + } catch (Exception e) { + // Read type got error. + return false; } - return false; } private Path getNicSpeedPath(String nic) { @@ -238,16 +248,17 @@ private Path getNicSpeedPath(String nic) { private double getTotalNicLimitKbps(List nics) { // Use the override value as configured. Return the total max speed across all available NICs, converted // from Gbps into Kbps - return overrideBrokerNicSpeedGbps.map(aDouble -> aDouble * nics.size() * 1024 * 1024) - .orElseGet(() -> nics.stream().mapToDouble(s -> { + return overrideBrokerNicSpeedGbps.map(aDouble -> aDouble * nics.size() * 1000 * 1000) + .orElseGet(() -> nics.stream().mapToDouble(nicPath -> { // Nic speed is in Mbits/s, return kbits/s try { - return Double.parseDouble(new String(Files.readAllBytes(getNicSpeedPath(s)))); + return Double.parseDouble(new String(Files.readAllBytes(getNicSpeedPath(nicPath)))); } catch (IOException e) { - log.error("Failed to read speed for nic " + s, e); + log.error(String.format("Failed to read speed for nic %s, maybe you can set broker" + + " config [loadBalancerOverrideBrokerNicSpeedGbps] to override it.", nicPath), e); return 0d; } - }).sum() * 1024); + }).sum() * 1000); } private Path getNicTxPath(String nic) { @@ -266,7 +277,7 @@ private double getTotalNicUsageRxKb(List nics) { log.error("Failed to read rx_bytes for NIC " + s, e); return 0d; } - }).sum() * 8 / 1024; + }).sum() * 8d / 1000; } private double getTotalNicUsageTxKb(List nics) { @@ -277,7 +288,7 @@ private double getTotalNicUsageTxKb(List nics) { log.error("Failed to read tx_bytes for NIC " + s, e); return 0d; } - }).sum() * 8 / 1024; + }).sum() * 8d / 1000; } private static long readLongFromFile(String path) throws IOException { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java index 74165c78dfa6e..2737e97df1476 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java @@ -75,9 +75,6 @@ protected Set initialValue() throws Exception { } }; - // update LoadReport at most every 5 seconds - public static final long LOAD_REPORT_UPDATE_MINIMUM_INTERVAL = TimeUnit.SECONDS.toMillis(5); - private static final String DEFAULT_DOMAIN = "default"; // Don't allow construction: static method namespace only. @@ -190,7 +187,9 @@ public static void fillNamespaceToBundlesMap(final Set bundles, bundles.forEach(bundleName -> { final String namespaceName = getNamespaceNameFromBundleName(bundleName); final String bundleRange = getBundleRangeFromBundleName(bundleName); - target.computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).add(bundleRange); + target.computeIfAbsent(namespaceName, + k -> ConcurrentOpenHashSet.newBuilder().build()) + .add(bundleRange); }); } @@ -263,8 +262,12 @@ public static void removeMostServicingBrokersForNamespace( for (final String broker : candidates) { int bundles = (int) brokerToNamespaceToBundleRange - .computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>()) - .computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).size(); + .computeIfAbsent(broker, + k -> ConcurrentOpenHashMap.>newBuilder().build()) + .computeIfAbsent(namespaceName, + k -> ConcurrentOpenHashSet.newBuilder().build()) + .size(); leastBundles = Math.min(leastBundles, bundles); if (leastBundles == 0) { break; @@ -276,8 +279,12 @@ public static void removeMostServicingBrokersForNamespace( final int finalLeastBundles = leastBundles; candidates.removeIf( - broker -> brokerToNamespaceToBundleRange.computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>()) - .computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).size() > finalLeastBundles); + broker -> brokerToNamespaceToBundleRange.computeIfAbsent(broker, + k -> ConcurrentOpenHashMap.>newBuilder().build()) + .computeIfAbsent(namespaceName, + k -> ConcurrentOpenHashSet.newBuilder().build()) + .size() > finalLeastBundles); } /** diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java index 7668c116a3954..d821e3399446c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java @@ -201,7 +201,10 @@ public class ModularLoadManagerImpl implements ModularLoadManager { */ public ModularLoadManagerImpl() { brokerCandidateCache = new HashSet<>(); - brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>(); + brokerToNamespaceToBundleRange = + ConcurrentOpenHashMap.>>newBuilder() + .build(); defaultStats = new NamespaceBundleStats(); filterPipeline = new ArrayList<>(); loadData = new LoadData(); @@ -424,8 +427,9 @@ private boolean needBrokerDataUpdate() { long timeSinceLastReportWrittenToStore = System.currentTimeMillis() - localData.getLastUpdate(); if (timeSinceLastReportWrittenToStore > updateMaxIntervalMillis) { log.info("Writing local data to metadata store because time since last" - + " update exceeded threshold of {} minutes", - conf.getLoadBalancerReportUpdateMaxIntervalMinutes()); + + " update exceeded threshold of {} minutes. ResourceUsage:[{}]", + conf.getLoadBalancerReportUpdateMaxIntervalMinutes(), + localData.printResourceUsage()); // Always update after surpassing the maximum interval. return true; } @@ -439,9 +443,10 @@ private boolean needBrokerDataUpdate() { percentChange(lastData.getNumBundles(), localData.getNumBundles())))); if (maxChange > conf.getLoadBalancerReportUpdateThresholdPercentage()) { log.info("Writing local data to metadata store because maximum change {}% exceeded threshold {}%; " - + "time since last report written is {} seconds", maxChange, + + "time since last report written is {} seconds. ResourceUsage:[{}]", maxChange, conf.getLoadBalancerReportUpdateThresholdPercentage(), - timeSinceLastReportWrittenToStore / 1000.0); + timeSinceLastReportWrittenToStore / 1000.0, + localData.printResourceUsage()); return true; } return false; @@ -547,7 +552,10 @@ private void updateBundleData() { brokerData.getTimeAverageData().reset(statsMap.keySet(), bundleData, defaultStats); final ConcurrentOpenHashMap> namespaceToBundleRange = brokerToNamespaceToBundleRange - .computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>()); + .computeIfAbsent(broker, k -> + ConcurrentOpenHashMap.>newBuilder() + .build()); synchronized (namespaceToBundleRange) { namespaceToBundleRange.clear(); LoadManagerShared.fillNamespaceToBundlesMap(statsMap.keySet(), namespaceToBundleRange); @@ -607,7 +615,8 @@ public synchronized void doLoadShedding() { return; } - log.info("[Overload shedder] Unloading bundle: {} from broker {}", bundle, broker); + log.info("[{}] Unloading bundle: {} from broker {}", + strategy.getClass().getSimpleName(), bundle, broker); try { pulsar.getAdminClient().namespaces().unloadNamespaceBundle(namespaceName, bundleRange); loadData.getRecentlyUnloadedBundles().put(bundle, System.currentTimeMillis()); @@ -682,6 +691,7 @@ public void checkNamespaceBundleSplit() { synchronized (bundleSplitStrategy) { final Set bundlesToBeSplit = bundleSplitStrategy.findBundlesToSplit(loadData, pulsar); NamespaceBundleFactory namespaceBundleFactory = pulsar.getNamespaceService().getNamespaceBundleFactory(); + int splitCount = 0; for (String bundleName : bundlesToBeSplit) { try { final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundleName); @@ -703,13 +713,14 @@ public void checkNamespaceBundleSplit() { pulsar.getAdminClient().namespaces().splitNamespaceBundle(namespaceName, bundleRange, unloadSplitBundles, null); + splitCount++; log.info("Successfully split namespace bundle {}", bundleName); } catch (Exception e) { log.error("Failed to split namespace bundle {}", bundleName, e); } } - updateBundleSplitMetrics(bundlesToBeSplit); + updateBundleSplitMetrics(splitCount); } } @@ -717,10 +728,10 @@ public void checkNamespaceBundleSplit() { /** * As leader broker, update bundle split metrics. * - * @param bundlesToBeSplit + * @param bundlesSplit the number of bundles splits */ - private void updateBundleSplitMetrics(Set bundlesToBeSplit) { - bundleSplitCount += bundlesToBeSplit.size(); + private void updateBundleSplitMetrics(int bundlesSplit) { + bundleSplitCount += bundlesSplit; List metrics = Lists.newArrayList(); Map dimensions = new HashMap<>(); @@ -818,7 +829,11 @@ public Optional selectBrokerForAssignment(final ServiceUnitId serviceUni LoadManagerShared.applyNamespacePolicies(serviceUnit, policies, brokerCandidateCache, getAvailableBrokers(), brokerTopicLoadingPredicate); - broker = placementStrategy.selectBroker(brokerCandidateCache, data, loadData, conf); + Optional brokerTmp = + placementStrategy.selectBroker(brokerCandidateCache, data, loadData, conf); + if (brokerTmp.isPresent()) { + broker = brokerTmp; + } } // Add new bundle to preallocated. @@ -829,9 +844,13 @@ public Optional selectBrokerForAssignment(final ServiceUnitId serviceUni final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle); final ConcurrentOpenHashMap> namespaceToBundleRange = brokerToNamespaceToBundleRange - .computeIfAbsent(broker.get(), k -> new ConcurrentOpenHashMap<>()); + .computeIfAbsent(broker.get(), + k -> ConcurrentOpenHashMap.>newBuilder() + .build()); synchronized (namespaceToBundleRange) { - namespaceToBundleRange.computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()) + namespaceToBundleRange.computeIfAbsent(namespaceName, + k -> ConcurrentOpenHashSet.newBuilder().build()) .add(bundleRange); } return broker; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java index a138fe397b020..ac55d679219f8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerWrapper.java @@ -18,7 +18,10 @@ */ package org.apache.pulsar.broker.loadbalance.impl; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import org.apache.pulsar.broker.PulsarServerException; @@ -64,8 +67,14 @@ public LoadManagerReport generateLoadReport() { @Override public Optional getLeastLoaded(final ServiceUnitId serviceUnit) { Optional leastLoadedBroker = loadManager.selectBrokerForAssignment(serviceUnit); - return leastLoadedBroker.map(s -> new SimpleResourceUnit(getBrokerWebServiceUrl(s), - new PulsarResourceDescription())); + return leastLoadedBroker.map(s -> { + String webServiceUrl = getBrokerWebServiceUrl(s); + String brokerZnodeName = getBrokerZnodeName(s, webServiceUrl); + Map map = new HashMap<>(); + map.put(ResourceUnit.PROPERTY_KEY_BROKER_ZNODE_NAME, brokerZnodeName); + return new SimpleResourceUnit(webServiceUrl, + new PulsarResourceDescription(), Collections.unmodifiableMap(map)); + }); } private String getBrokerWebServiceUrl(String broker) { @@ -77,6 +86,11 @@ private String getBrokerWebServiceUrl(String broker) { return String.format("http://%s", broker); } + private String getBrokerZnodeName(String broker, String webServiceUrl) { + String scheme = webServiceUrl.substring(0, webServiceUrl.indexOf("://")); + return String.format("%s://%s", scheme, broker); + } + @Override public List getLoadBalancingMetrics() { return loadManager.getLoadBalancingMetrics(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedder.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedder.java index 3f33fa353c2ab..985ed6fd5f81e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedder.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/OverloadShedder.java @@ -18,8 +18,6 @@ */ package org.apache.pulsar.broker.loadbalance.impl; -import static org.apache.pulsar.broker.namespace.NamespaceService.HEARTBEAT_NAMESPACE_PATTERN; -import static org.apache.pulsar.broker.namespace.NamespaceService.HEARTBEAT_NAMESPACE_PATTERN_V2; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; import java.util.Map; @@ -102,10 +100,8 @@ public Multimap findBundlesForUnloading(final LoadData loadData, // Sort bundles by throughput, then pick the biggest N which combined // make up for at least the minimum throughput to offload - loadData.getBundleData().entrySet().stream() - .filter(e -> !HEARTBEAT_NAMESPACE_PATTERN.matcher(e.getKey()).matches() - && !HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(e.getKey()).matches() - && localData.getBundles().contains(e.getKey())) + loadData.getBundleDataForLoadShedding().entrySet().stream() + .filter(e -> localData.getBundles().contains(e.getKey())) .map((e) -> { // Map to throughput value // Consider short-term byte rate to address system resource burden diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java index e1829e68aed0e..ed766b1b93004 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.loadbalance.impl; -import static org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared.LOAD_REPORT_UPDATE_MINIMUM_INTERVAL; import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheBuilder; @@ -200,7 +199,10 @@ public SimpleLoadManagerImpl() { bundleLossesCache = new HashSet<>(); brokerCandidateCache = new HashSet<>(); availableBrokersCache = new HashSet<>(); - brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>(); + brokerToNamespaceToBundleRange = + ConcurrentOpenHashMap.>>newBuilder() + .build(); this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() { @Override public boolean isEnablePersistentTopics(String brokerUrl) { @@ -833,8 +835,12 @@ private synchronized ResourceUnit findBrokerForPlacement(Multimap new ConcurrentOpenHashMap<>()) - .computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).add(bundleRange); + k -> ConcurrentOpenHashMap.>newBuilder() + .build()) + .computeIfAbsent(namespaceName, k -> + ConcurrentOpenHashSet.newBuilder().build()) + .add(bundleRange); ranking.addPreAllocatedServiceUnit(serviceUnitId, quota); resourceUnitRankings.put(selectedRU, ranking); } @@ -1134,10 +1140,12 @@ public void setLoadReportForceUpdateFlag() { public void writeLoadReportOnZookeeper() throws Exception { // update average JVM heap usage to average value of the last 120 seconds long realtimeJvmHeapUsage = getRealtimeJvmHeapUsageMBytes(); + int minInterval = pulsar.getConfiguration().getLoadBalancerReportUpdateMinIntervalMillis(); if (this.avgJvmHeapUsageMBytes <= 0) { this.avgJvmHeapUsageMBytes = realtimeJvmHeapUsage; } else { - long weight = Math.max(1, TimeUnit.SECONDS.toMillis(120) / LOAD_REPORT_UPDATE_MINIMUM_INTERVAL); + + long weight = Math.max(1, TimeUnit.SECONDS.toMillis(120) / minInterval); this.avgJvmHeapUsageMBytes = ((weight - 1) * this.avgJvmHeapUsageMBytes + realtimeJvmHeapUsage) / weight; } @@ -1156,7 +1164,7 @@ public void writeLoadReportOnZookeeper() throws Exception { int maxUpdateIntervalInMinutes = pulsar.getConfiguration().getLoadBalancerReportUpdateMaxIntervalMinutes(); if (timeElapsedSinceLastReport > TimeUnit.MINUTES.toMillis(maxUpdateIntervalInMinutes)) { needUpdate = true; - } else if (timeElapsedSinceLastReport > LOAD_REPORT_UPDATE_MINIMUM_INTERVAL) { + } else if (timeElapsedSinceLastReport > minInterval) { // check number of bundles assigned, comparing with last LoadReport long oldBundleCount = lastLoadReport.getNumBundles(); long newBundleCount = pulsar.getBrokerService().getNumberOfNamespaceBundles(); @@ -1225,7 +1233,7 @@ public void writeLoadReportOnZookeeper() throws Exception { */ private boolean isLoadReportGenerationIntervalPassed() { long timeSinceLastGenMillis = System.currentTimeMillis() - lastLoadReport.getTimestamp(); - return timeSinceLastGenMillis > LOAD_REPORT_UPDATE_MINIMUM_INTERVAL; + return timeSinceLastGenMillis > pulsar.getConfiguration().getLoadBalancerReportUpdateMinIntervalMillis(); } // todo: changeme: this can be optimized, we don't have to iterate through everytime @@ -1252,7 +1260,10 @@ private synchronized void updateBrokerToNamespaceToBundle() { final Set preallocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles(); final ConcurrentOpenHashMap> namespaceToBundleRange = brokerToNamespaceToBundleRange - .computeIfAbsent(broker.replace("http://", ""), k -> new ConcurrentOpenHashMap<>()); + .computeIfAbsent(broker.replace("http://", ""), + k -> ConcurrentOpenHashMap.>newBuilder() + .build()); namespaceToBundleRange.clear(); LoadManagerShared.fillNamespaceToBundlesMap(loadedBundles, namespaceToBundleRange); LoadManagerShared.fillNamespaceToBundlesMap(preallocatedBundles, namespaceToBundleRange); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceUnit.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceUnit.java index 62f9b3e94a588..863a75dff4235 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceUnit.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleResourceUnit.java @@ -19,19 +19,32 @@ package org.apache.pulsar.broker.loadbalance.impl; import com.google.common.base.MoreObjects; +import java.util.Collections; +import java.util.Map; import org.apache.pulsar.broker.loadbalance.ResourceDescription; import org.apache.pulsar.broker.loadbalance.ResourceUnit; public class SimpleResourceUnit implements ResourceUnit { - private String resourceId; - private ResourceDescription resourceDescription; + private final String resourceId; + private final ResourceDescription resourceDescription; + + private final Map properties; public SimpleResourceUnit(String resourceId, ResourceDescription resourceDescription) { this.resourceId = resourceId; this.resourceDescription = resourceDescription; + this.properties = Collections.emptyMap(); + } + + public SimpleResourceUnit(String resourceId, ResourceDescription resourceDescription, + Map properties) { + this.resourceId = resourceId; + this.resourceDescription = resourceDescription; + this.properties = properties == null ? Collections.emptyMap() : properties; } + @Override public String getResourceId() { // TODO Auto-generated method stub @@ -50,6 +63,11 @@ public boolean canFit(ResourceDescription resourceDescription) { return this.resourceDescription.compareTo(resourceDescription) > 0; } + @Override + public Object getProperty(String key) { + return properties.get(key); + } + @Override public int compareTo(ResourceUnit o) { return resourceId.compareTo(o.getResourceId()); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedder.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedder.java index 9c89be92c4cc5..44e61de0b97cc 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedder.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ThresholdShedder.java @@ -18,8 +18,6 @@ */ package org.apache.pulsar.broker.loadbalance.impl; -import static org.apache.pulsar.broker.namespace.NamespaceService.HEARTBEAT_NAMESPACE_PATTERN; -import static org.apache.pulsar.broker.namespace.NamespaceService.HEARTBEAT_NAMESPACE_PATTERN_V2; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; import java.util.HashMap; @@ -51,23 +49,41 @@ */ public class ThresholdShedder implements LoadSheddingStrategy { private static final Logger log = LoggerFactory.getLogger(ThresholdShedder.class); - private final Multimap selectedBundlesCache = ArrayListMultimap.create(); - private static final double ADDITIONAL_THRESHOLD_PERCENT_MARGIN = 0.05; - private static final double MB = 1024 * 1024; + private static final long LOAD_LOG_SAMPLE_DELAY_IN_SEC = 5 * 60; // 5 mins private final Map brokerAvgResourceUsage = new HashMap<>(); + private long lastSampledLoadLogTS = 0; + + + private static int toPercentage(double usage) { + return (int) (usage * 100); + } + + private boolean canSampleLog() { + long now = System.currentTimeMillis() / 1000; + boolean sampleLog = now - lastSampledLoadLogTS >= LOAD_LOG_SAMPLE_DELAY_IN_SEC; + if (sampleLog) { + lastSampledLoadLogTS = now; + } + return sampleLog; + } @Override public Multimap findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); + boolean sampleLog = canSampleLog(); final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0; final Map recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB; - final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf); + final double avgUsage = getBrokerAvgUsage( + loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf, sampleLog); + if (sampleLog) { + log.info("brokers' resource avgUsage:{}%", toPercentage(avgUsage)); + } if (avgUsage == 0) { log.warn("average max resource usage is 0"); @@ -79,8 +95,9 @@ public Multimap findBundlesForUnloading(final LoadData loadData, final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0); if (currentUsage < avgUsage + threshold) { - if (log.isDebugEnabled()) { - log.debug("[{}] broker is not overloaded, ignoring at this point", broker); + if (sampleLog) { + log.info("[{}] broker is not overloaded, ignoring at this point, currentUsage:{}%", + broker, toPercentage(currentUsage)); } return; } @@ -91,14 +108,13 @@ public Multimap findBundlesForUnloading(final LoadData loadData, double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; if (minimumThroughputToOffload < minThroughputThreshold) { - if (log.isDebugEnabled()) { + if (sampleLog) { log.info("[{}] broker is planning to shed throughput {} MByte/s less than " + "minimumThroughputThreshold {} MByte/s, skipping bundle unload.", broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB); } return; } - log.info( "Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%" + " > {}% + {}% -- Offloading at least {} MByte/s of traffic, left throughput {} MByte/s", @@ -109,9 +125,7 @@ public Multimap findBundlesForUnloading(final LoadData loadData, MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false); if (localData.getBundles().size() > 1) { - loadData.getBundleData().entrySet().stream() - .filter(e -> !HEARTBEAT_NAMESPACE_PATTERN.matcher(e.getKey()).matches() - && !HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(e.getKey()).matches()) + loadData.getBundleDataForLoadShedding().entrySet().stream() .map((e) -> { String bundle = e.getKey(); BundleData bundleData = e.getValue(); @@ -146,32 +160,70 @@ public Multimap findBundlesForUnloading(final LoadData loadData, } private double getBrokerAvgUsage(final LoadData loadData, final double historyPercentage, - final ServiceConfiguration conf) { + final ServiceConfiguration conf, boolean sampleLog) { double totalUsage = 0.0; int totalBrokers = 0; for (Map.Entry entry : loadData.getBrokerData().entrySet()) { LocalBrokerData localBrokerData = entry.getValue().getLocalData(); String broker = entry.getKey(); - updateAvgResourceUsage(broker, localBrokerData, historyPercentage, conf); - totalUsage += brokerAvgResourceUsage.getOrDefault(broker, 0.0); + totalUsage += updateAvgResourceUsage(broker, localBrokerData, historyPercentage, conf, sampleLog); totalBrokers++; } return totalBrokers > 0 ? totalUsage / totalBrokers : 0; } - private void updateAvgResourceUsage(String broker, LocalBrokerData localBrokerData, final double historyPercentage, - final ServiceConfiguration conf) { - double historyUsage = - brokerAvgResourceUsage.getOrDefault(broker, 0.0); - historyUsage = historyUsage * historyPercentage - + (1 - historyPercentage) * localBrokerData.getMaxResourceUsageWithWeight( + private double updateAvgResourceUsage(String broker, LocalBrokerData localBrokerData, + final double historyPercentage, final ServiceConfiguration conf, + boolean sampleLog) { + Double historyUsage = + brokerAvgResourceUsage.get(broker); + double resourceUsage = localBrokerData.getMaxResourceUsageWithWeight( conf.getLoadBalancerCPUResourceWeight(), conf.getLoadBalancerMemoryResourceWeight(), conf.getLoadBalancerDirectMemoryResourceWeight(), conf.getLoadBalancerBandwithInResourceWeight(), conf.getLoadBalancerBandwithOutResourceWeight()); + + if (sampleLog) { + log.info("{} broker load: historyUsage={}%, resourceUsage={}%", + broker, + historyUsage == null ? 0 : toPercentage(historyUsage), + toPercentage(resourceUsage)); + } + + // wrap if resourceUsage is bigger than 1.0 + if (resourceUsage > 1.0) { + log.error("{} broker resourceUsage is bigger than 100%. " + + "Some of the resource limits are mis-configured. " + + "Try to disable the error resource signals by setting their weights to zero " + + "or fix the resource limit configurations. " + + "Ref:https://pulsar.apache.org/docs/administration-load-balance/#thresholdshedder " + + "ResourceUsage:[{}], " + + "CPUResourceWeight:{}, MemoryResourceWeight:{}, DirectMemoryResourceWeight:{}, " + + "BandwithInResourceWeight:{}, BandwithOutResourceWeight:{}", + broker, + localBrokerData.printResourceUsage(), + conf.getLoadBalancerCPUResourceWeight(), + conf.getLoadBalancerMemoryResourceWeight(), + conf.getLoadBalancerDirectMemoryResourceWeight(), + conf.getLoadBalancerBandwithInResourceWeight(), + conf.getLoadBalancerBandwithOutResourceWeight()); + + resourceUsage = localBrokerData.getMaxResourceUsageWithWeightWithinLimit( + conf.getLoadBalancerCPUResourceWeight(), + conf.getLoadBalancerMemoryResourceWeight(), conf.getLoadBalancerDirectMemoryResourceWeight(), + conf.getLoadBalancerBandwithInResourceWeight(), + conf.getLoadBalancerBandwithOutResourceWeight()); + + log.warn("{} broker recomputed max resourceUsage={}%. Skipped usage signals bigger than 100%", + broker, toPercentage(resourceUsage)); + } + historyUsage = historyUsage == null + ? resourceUsage : historyUsage * historyPercentage + (1 - historyPercentage) * resourceUsage; + brokerAvgResourceUsage.put(broker, historyUsage); + return historyUsage; } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java index dab1b293e9008..967059c07184b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/lookup/TopicLookupBase.java @@ -48,6 +48,7 @@ import org.apache.pulsar.common.policies.data.NamespaceOperation; import org.apache.pulsar.common.policies.data.TopicOperation; import org.apache.pulsar.common.util.Codec; +import org.apache.pulsar.common.util.FutureUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -219,23 +220,14 @@ public static CompletableFuture lookupTopicAsync(PulsarService pulsarSe cluster); } validationFuture.complete(newLookupResponse(differentClusterData.getBrokerServiceUrl(), - differentClusterData.getBrokerServiceUrlTls(), true, LookupType.Redirect, requestId, false)); + differentClusterData.getBrokerServiceUrlTls(), true, LookupType.Redirect, + requestId, false)); } else { // (2) authorize client - try { - checkAuthorization(pulsarService, topicName, clientAppId, authenticationData); - } catch (RestException authException) { - log.warn("Failed to authorized {} on cluster {}", clientAppId, topicName.toString()); - validationFuture.complete(newLookupErrorResponse(ServerError.AuthorizationError, - authException.getMessage(), requestId)); - return; - } catch (Exception e) { - log.warn("Unknown error while authorizing {} on cluster {}", clientAppId, topicName.toString()); - validationFuture.completeExceptionally(e); - return; - } - // (3) validate global namespace - checkLocalOrGetPeerReplicationCluster(pulsarService, topicName.getNamespaceObject()) + checkAuthorizationAsync(pulsarService, topicName, clientAppId, authenticationData).thenRun(() -> { + // (3) validate global namespace + checkLocalOrGetPeerReplicationCluster(pulsarService, + topicName.getNamespaceObject()) .thenAccept(peerClusterData -> { if (peerClusterData == null) { // (4) all validation passed: initiate lookup @@ -247,21 +239,36 @@ public static CompletableFuture lookupTopicAsync(PulsarService pulsarSe if (StringUtils.isBlank(peerClusterData.getBrokerServiceUrl()) && StringUtils.isBlank(peerClusterData.getBrokerServiceUrlTls())) { validationFuture.complete(newLookupErrorResponse(ServerError.MetadataError, - "Redirected cluster's brokerService url is not configured", requestId)); + "Redirected cluster's brokerService url is not configured", + requestId)); return; } validationFuture.complete(newLookupResponse(peerClusterData.getBrokerServiceUrl(), - peerClusterData.getBrokerServiceUrlTls(), true, LookupType.Redirect, requestId, + peerClusterData.getBrokerServiceUrlTls(), true, LookupType.Redirect, + requestId, false)); - }).exceptionally(ex -> { - validationFuture.complete( - newLookupErrorResponse(ServerError.MetadataError, ex.getMessage(), requestId)); - return null; - }); + validationFuture.complete( + newLookupErrorResponse(ServerError.MetadataError, + FutureUtil.unwrapCompletionException(ex).getMessage(), requestId)); + return null; + }); + }) + .exceptionally(e -> { + Throwable throwable = FutureUtil.unwrapCompletionException(e); + if (throwable instanceof RestException) { + log.warn("Failed to authorized {} on cluster {}", clientAppId, topicName); + validationFuture.complete(newLookupErrorResponse(ServerError.AuthorizationError, + throwable.getMessage(), requestId)); + } else { + log.warn("Unknown error while authorizing {} on cluster {}", clientAppId, topicName); + validationFuture.completeExceptionally(throwable); + } + return null; + }); } }).exceptionally(ex -> { - validationFuture.completeExceptionally(ex); + validationFuture.completeExceptionally(FutureUtil.unwrapCompletionException(ex)); return null; }); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java index adcb504c699a5..fa4cd16ee111f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java @@ -46,6 +46,7 @@ import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.ListUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; @@ -55,6 +56,7 @@ import org.apache.pulsar.broker.loadbalance.ResourceUnit; import org.apache.pulsar.broker.lookup.LookupResult; import org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.nonpersistent.NonPersistentTopic; import org.apache.pulsar.broker.stats.prometheus.metrics.Summary; import org.apache.pulsar.broker.web.PulsarWebResource; @@ -65,6 +67,7 @@ import org.apache.pulsar.client.impl.ClientBuilderImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace.Mode; import org.apache.pulsar.common.lookup.data.LookupData; import org.apache.pulsar.common.naming.NamespaceBundle; @@ -165,7 +168,8 @@ public NamespaceService(PulsarService pulsar) { this.loadManager = pulsar.getLoadManager(); this.bundleFactory = new NamespaceBundleFactory(pulsar, Hashing.crc32()); this.ownershipCache = new OwnershipCache(pulsar, bundleFactory, this); - this.namespaceClients = new ConcurrentOpenHashMap<>(); + this.namespaceClients = + ConcurrentOpenHashMap.newBuilder().build(); this.bundleOwnershipListeners = new CopyOnWriteArrayList<>(); this.localBrokerDataCache = pulsar.getLocalMetadataStore().getMetadataCache(LocalBrokerData.class); this.localPoliciesCache = pulsar.getLocalMetadataStore().getMetadataCache(LocalPolicies.class); @@ -355,9 +359,15 @@ public boolean registerNamespace(NamespaceName nsname, boolean ensureOwned) thro } private final ConcurrentOpenHashMap>> - findingBundlesAuthoritative = new ConcurrentOpenHashMap<>(); + findingBundlesAuthoritative = + ConcurrentOpenHashMap.>>newBuilder() + .build(); private final ConcurrentOpenHashMap>> - findingBundlesNotAuthoritative = new ConcurrentOpenHashMap<>(); + findingBundlesNotAuthoritative = + ConcurrentOpenHashMap.>>newBuilder() + .build(); /** * Main internal method to lookup and setup ownership of service unit to a broker. @@ -448,12 +458,16 @@ private void searchForCandidateBroker(NamespaceBundle bundle, return; } String candidateBroker = null; + String candidateBrokerAdvertisedAddr = null; LeaderElectionService les = pulsar.getLeaderElectionService(); if (les == null) { // The leader election service was not initialized yet. This can happen because the broker service is // initialized first and it might start receiving lookup requests before the leader election service is // fully initialized. + LOG.warn("Leader election service isn't initialized yet. " + + "Returning empty result to lookup. NamespaceBundle[{}]", + bundle); lookupFuture.complete(Optional.empty()); return; } @@ -480,23 +494,46 @@ private void searchForCandidateBroker(NamespaceBundle bundle, if (options.isAuthoritative()) { // leader broker already assigned the current broker as owner candidateBroker = pulsar.getSafeWebServiceAddress(); - } else if (!this.loadManager.get().isCentralized() - || pulsar.getLeaderElectionService().isLeader() - || !currentLeader.isPresent() - + } else { + LoadManager loadManager = this.loadManager.get(); + boolean makeLoadManagerDecisionOnThisBroker = !loadManager.isCentralized() || les.isLeader(); + if (!makeLoadManagerDecisionOnThisBroker) { // If leader is not active, fallback to pick the least loaded from current broker loadmanager - || !isBrokerActive(currentLeader.get().getServiceUrl()) - ) { - Optional availableBroker = getLeastLoadedFromLoadManager(bundle); - if (!availableBroker.isPresent()) { - lookupFuture.complete(Optional.empty()); - return; + boolean leaderBrokerActive = currentLeader.isPresent() + && isBrokerActive(currentLeader.get().getServiceUrl()); + if (!leaderBrokerActive) { + makeLoadManagerDecisionOnThisBroker = true; + if (!currentLeader.isPresent()) { + LOG.warn( + "The information about the current leader broker wasn't available. " + + "Handling load manager decisions in a decentralized way. " + + "NamespaceBundle[{}]", + bundle); + } else { + LOG.warn( + "The current leader broker {} isn't active. " + + "Handling load manager decisions in a decentralized way. " + + "NamespaceBundle[{}]", + currentLeader.get(), bundle); + } + } + } + if (makeLoadManagerDecisionOnThisBroker) { + Optional> availableBroker = getLeastLoadedFromLoadManager(bundle); + if (!availableBroker.isPresent()) { + LOG.warn("Load manager didn't return any available broker. " + + "Returning empty result to lookup. NamespaceBundle[{}]", + bundle); + lookupFuture.complete(Optional.empty()); + return; + } + candidateBroker = availableBroker.get().getLeft(); + candidateBrokerAdvertisedAddr = availableBroker.get().getRight(); + authoritativeRedirect = true; + } else { + // forward to leader broker to make assignment + candidateBroker = currentLeader.get().getServiceUrl(); } - candidateBroker = availableBroker.get(); - authoritativeRedirect = true; - } else { - // forward to leader broker to make assignment - candidateBroker = currentLeader.get().getServiceUrl(); } } } catch (Exception e) { @@ -562,7 +599,8 @@ private void searchForCandidateBroker(NamespaceBundle bundle, } // Now setting the redirect url - createLookupResult(candidateBroker, authoritativeRedirect, options.getAdvertisedListenerName()) + createLookupResult(candidateBrokerAdvertisedAddr == null ? candidateBroker + : candidateBrokerAdvertisedAddr, authoritativeRedirect, options.getAdvertisedListenerName()) .thenAccept(lookupResult -> lookupFuture.complete(Optional.of(lookupResult))) .exceptionally(ex -> { lookupFuture.completeExceptionally(ex); @@ -577,19 +615,16 @@ private void searchForCandidateBroker(NamespaceBundle bundle, } protected CompletableFuture createLookupResult(String candidateBroker, boolean authoritativeRedirect, - final String advertisedListenerName) - throws Exception { + final String advertisedListenerName) { CompletableFuture lookupFuture = new CompletableFuture<>(); try { - checkArgument(StringUtils.isNotBlank(candidateBroker), "Lookup broker can't be null " + candidateBroker); - URI uri = new URI(candidateBroker); - String path = String.format("%s/%s:%s", LoadManager.LOADBALANCE_BROKERS_ROOT, uri.getHost(), - uri.getPort()); + checkArgument(StringUtils.isNotBlank(candidateBroker), "Lookup broker can't be null %s", candidateBroker); + String path = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + parseHostAndPort(candidateBroker); localBrokerDataCache.get(path).thenAccept(reportData -> { if (reportData.isPresent()) { - LocalBrokerData lookupData = (LocalBrokerData) reportData.get(); + LocalBrokerData lookupData = reportData.get(); if (StringUtils.isNotBlank(advertisedListenerName)) { AdvertisedListener listener = lookupData.getAdvertisedListeners().get(advertisedListenerName); if (listener == null) { @@ -622,22 +657,36 @@ protected CompletableFuture createLookupResult(String candidateBro } private boolean isBrokerActive(String candidateBroker) { - List brokers = pulsar.getLocalMetadataStore().getChildren(LoadManager.LOADBALANCE_BROKERS_ROOT).join(); - - for (String brokerHostPort : brokers) { - if (candidateBroker.equals("http://" + brokerHostPort)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Broker {} found for SLA Monitoring Namespace", brokerHostPort); - } - return true; + String candidateBrokerHostAndPort = parseHostAndPort(candidateBroker); + Set availableBrokers = getAvailableBrokers(); + if (availableBrokers.contains(candidateBrokerHostAndPort)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Broker {} ({}) is available for.", candidateBroker, candidateBrokerHostAndPort); } + return true; + } else { + LOG.warn("Broker {} ({}) couldn't be found in available brokers {}", + candidateBroker, candidateBrokerHostAndPort, + availableBrokers.stream().collect(Collectors.joining(","))); + return false; } + } - if (LOG.isDebugEnabled()) { - LOG.debug("Broker not found for SLA Monitoring Namespace {}", - candidateBroker + ":" + config.getWebServicePort()); + private static String parseHostAndPort(String candidateBroker) { + int uriSeparatorPos = candidateBroker.indexOf("://"); + if (uriSeparatorPos == -1) { + throw new IllegalArgumentException("'" + candidateBroker + "' isn't an URI."); + } + String candidateBrokerHostAndPort = candidateBroker.substring(uriSeparatorPos + 3); + return candidateBrokerHostAndPort; + } + + private Set getAvailableBrokers() { + try { + return loadManager.get().getAvailableBrokers(); + } catch (Exception e) { + throw new RuntimeException(e); } - return false; } /** @@ -646,7 +695,7 @@ private boolean isBrokerActive(String candidateBroker) { * @return * @throws Exception */ - private Optional getLeastLoadedFromLoadManager(ServiceUnitId serviceUnit) throws Exception { + private Optional> getLeastLoadedFromLoadManager(ServiceUnitId serviceUnit) throws Exception { Optional leastLoadedBroker = loadManager.get().getLeastLoaded(serviceUnit); if (!leastLoadedBroker.isPresent()) { LOG.warn("No broker is available for {}", serviceUnit); @@ -654,12 +703,14 @@ private Optional getLeastLoadedFromLoadManager(ServiceUnitId serviceUnit } String lookupAddress = leastLoadedBroker.get().getResourceId(); + String advertisedAddr = (String) leastLoadedBroker.get() + .getProperty(ResourceUnit.PROPERTY_KEY_BROKER_ZNODE_NAME); if (LOG.isDebugEnabled()) { LOG.debug("{} : redirecting to the least loaded broker, lookup address={}", pulsar.getSafeWebServiceAddress(), lookupAddress); } - return Optional.of(lookupAddress); + return Optional.of(Pair.of(lookupAddress, advertisedAddr)); } public CompletableFuture unloadNamespaceBundle(NamespaceBundle bundle) { @@ -858,6 +909,8 @@ void splitAndOwnBundleOnceAndRetry(NamespaceBundle bundle, // update bundled_topic cache for load-report-generation pulsar.getBrokerService().refreshTopicToStatsMaps(bundle); loadManager.get().setLoadReportForceUpdateFlag(); + // release old bundle from ownership cache + pulsar.getNamespaceService().getOwnershipCache().removeOwnership(bundle); completionFuture.complete(null); if (unload) { // Unload new split bundles, in background. This will not @@ -944,13 +997,26 @@ public CompletableFuture isServiceUnitOwnedAsync(ServiceUnitId suName) public boolean isServiceUnitActive(TopicName topicName) { try { - return ownershipCache.getOwnedBundle(getBundle(topicName)).isActive(); + OwnedBundle ownedBundle = ownershipCache.getOwnedBundle(getBundle(topicName)); + if (ownedBundle == null) { + return false; + } + return ownedBundle.isActive(); } catch (Exception e) { - LOG.warn("Unable to find OwnedBundle for topic - [{}]", topicName); + LOG.warn("Unable to find OwnedBundle for topic - [{}]", topicName, e); return false; } } + public CompletableFuture isServiceUnitActiveAsync(TopicName topicName) { + Optional> res = ownershipCache.getOwnedBundleAsync(getBundle(topicName)); + if (!res.isPresent()) { + return CompletableFuture.completedFuture(false); + } + + return res.get().thenApply(ob -> ob != null && ob.isActive()); + } + private boolean isNamespaceOwned(NamespaceName fqnn) throws Exception { return ownershipCache.getOwnedBundle(getFullBundle(fqnn)) != null; } @@ -988,7 +1054,7 @@ private boolean isTopicOwned(TopicName topicName) { public CompletableFuture checkTopicOwnership(TopicName topicName) { return getBundleAsync(topicName) - .thenApply(ownershipCache::checkOwnership); + .thenCompose(ownershipCache::checkOwnershipAsync); } public void removeOwnedServiceUnit(NamespaceBundle nsBundle) throws Exception { @@ -1079,9 +1145,25 @@ public CompletableFuture checkTopicExists(TopicName topic) { if (topic.isPersistent()) { return pulsar.getPulsarResources().getTopicResources().persistentTopicExists(topic); } else { - return pulsar.getBrokerService() - .getTopicIfExists(topic.toString()) - .thenApply(optTopic -> optTopic.isPresent()); + if (topic.isPartitioned()) { + final TopicName partitionedTopicName = TopicName.get(topic.getPartitionedTopicName()); + return pulsar.getBrokerService() + .fetchPartitionedTopicMetadataAsync(partitionedTopicName) + .thenApply((metadata) -> topic.getPartitionIndex() < metadata.partitions); + } else { + // only checks and don't do any topic creating and loading. + CompletableFuture> topicFuture = + pulsar.getBrokerService().getTopics().get(topic.toString()); + if (topicFuture == null) { + return CompletableFuture.completedFuture(false); + } else { + return topicFuture.thenApply(Optional::isPresent).exceptionally(throwable -> { + LOG.warn("[{}] topicFuture completed with exception when checkTopicExists, {}", + topic, throwable.getMessage()); + return false; + }); + } + } } } @@ -1200,6 +1282,11 @@ public PulsarClientImpl getNamespaceClient(ClusterDataImpl cluster) { .enableTcpNoDelay(false) .statsInterval(0, TimeUnit.SECONDS); + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + clientBuilder.loadConf(PropertiesUtils.filterAndMapProperties(config.getProperties(), "brokerClient_")); + if (pulsar.getConfiguration().isAuthenticationEnabled()) { clientBuilder.authentication(pulsar.getConfiguration().getBrokerClientAuthenticationPlugin(), pulsar.getConfiguration().getBrokerClientAuthenticationParameters()); @@ -1287,7 +1374,7 @@ public static NamespaceName getSLAMonitorNamespace(String host, ServiceConfigura public static String checkHeartbeatNamespace(ServiceUnitId ns) { Matcher m = HEARTBEAT_NAMESPACE_PATTERN.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { - LOG.debug("SLAMonitoring namespace matched the lookup namespace {}", ns.getNamespaceObject().toString()); + LOG.debug("Heartbeat namespace matched the lookup namespace {}", ns.getNamespaceObject().toString()); return String.format("http://%s", m.group(1)); } else { return null; @@ -1297,7 +1384,7 @@ public static String checkHeartbeatNamespace(ServiceUnitId ns) { public static String checkHeartbeatNamespaceV2(ServiceUnitId ns) { Matcher m = HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(ns.getNamespaceObject().toString()); if (m.matches()) { - LOG.debug("SLAMonitoring namespace matched the lookup namespace {}", ns.getNamespaceObject().toString()); + LOG.debug("Heartbeat namespace v2 matched the lookup namespace {}", ns.getNamespaceObject().toString()); return String.format("http://%s", m.group(1)); } else { return null; @@ -1313,6 +1400,12 @@ public static String getSLAMonitorBrokerName(ServiceUnitId ns) { } } + public static boolean isSystemServiceNamespace(String namespace) { + return HEARTBEAT_NAMESPACE_PATTERN.matcher(namespace).matches() + || HEARTBEAT_NAMESPACE_PATTERN_V2.matcher(namespace).matches() + || SLA_NAMESPACE_PATTERN.matcher(namespace).matches(); + } + public boolean registerSLANamespace() throws PulsarServerException { boolean isNameSpaceRegistered = registerNamespace(getSLAMonitorNamespace(host, config), false); if (isNameSpaceRegistered) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java index daedb712e299c..fc014414f5ea2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/OwnershipCache.java @@ -148,8 +148,13 @@ public OwnershipCache(PulsarService pulsar, NamespaceBundleFactory bundleFactory * @param bundle namespace bundle * @return future that will complete with check result */ - public boolean checkOwnership(NamespaceBundle bundle) { - return getOwnedBundle(bundle) != null; + public CompletableFuture checkOwnershipAsync(NamespaceBundle bundle) { + Optional> ownedBundleFuture = getOwnedBundleAsync(bundle); + if (!ownedBundleFuture.isPresent()) { + return CompletableFuture.completedFuture(false); + } + return ownedBundleFuture.get() + .thenApply(bd -> bd != null && bd.isActive()); } /** @@ -277,6 +282,10 @@ public OwnedBundle getOwnedBundle(NamespaceBundle bundle) { } } + public Optional> getOwnedBundleAsync(NamespaceBundle bundle) { + return Optional.ofNullable(ownedBundlesCache.getIfPresent(bundle)); + } + /** * Disable bundle in local cache and on zk. * diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoader.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoader.java index 223cf81ccdb56..63aa6696917d9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoader.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoader.java @@ -26,6 +26,7 @@ import lombok.Data; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.ClassLoaderSwitcher; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.common.nar.NarClassLoader; @@ -95,22 +96,4 @@ public void close() { log.warn("Failed to close the protocol handler class loader", e); } } - - /** - * Help to switch the class loader of current thread to the NarClassLoader, and change it back when it's done. - * With the help of try-with-resources statement, the code would be cleaner than using try finally every time. - */ - private static class ClassLoaderSwitcher implements AutoCloseable { - private final ClassLoader prevClassLoader; - - ClassLoaderSwitcher(ClassLoader classLoader) { - prevClassLoader = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(classLoader); - } - - @Override - public void close() { - Thread.currentThread().setContextClassLoader(prevClassLoader); - } - } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java index 677c04ad9a6f4..4900027d21810 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupService.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.resourcegroup; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import io.prometheus.client.Counter; import io.prometheus.client.Summary; import java.util.Map; @@ -575,7 +576,7 @@ protected void aggregateResourceGroupLocalUsages() { cancelStatus, this.aggregateLocalUsagePeriodInSeconds, newPeriodInSeconds, timeUnitScale); } this.aggreagteLocalUsagePeriodicTask = pulsar.getExecutor().scheduleAtFixedRate( - this::aggregateResourceGroupLocalUsages, + catchingAndLoggingThrowables(this::aggregateResourceGroupLocalUsages), newPeriodInSeconds, newPeriodInSeconds, timeUnitScale); @@ -665,7 +666,7 @@ protected void calculateQuotaForAllResourceGroups() { cancelStatus, this.resourceUsagePublishPeriodInSeconds, newPeriodInSeconds, timeUnitScale); } this.calculateQuotaPeriodicTask = pulsar.getExecutor().scheduleAtFixedRate( - this::calculateQuotaForAllResourceGroups, + catchingAndLoggingThrowables(this::calculateQuotaForAllResourceGroups), newPeriodInSeconds, newPeriodInSeconds, timeUnitScale); @@ -680,12 +681,12 @@ private void initialize() { long periodInSecs = config.getResourceUsageTransportPublishIntervalInSecs(); this.aggregateLocalUsagePeriodInSeconds = this.resourceUsagePublishPeriodInSeconds = periodInSecs; this.aggreagteLocalUsagePeriodicTask = this.pulsar.getExecutor().scheduleAtFixedRate( - this::aggregateResourceGroupLocalUsages, + catchingAndLoggingThrowables(this::aggregateResourceGroupLocalUsages), periodInSecs, periodInSecs, this.timeUnitScale); this.calculateQuotaPeriodicTask = this.pulsar.getExecutor().scheduleAtFixedRate( - this::calculateQuotaForAllResourceGroups, + catchingAndLoggingThrowables(this::calculateQuotaForAllResourceGroups), periodInSecs, periodInSecs, this.timeUnitScale); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java index ca83cae91c5ad..5dc50f2a25536 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java @@ -108,7 +108,7 @@ public boolean needToReportLocalUsage(long currentBytesUsed, long lastReportedBy final float toleratedDriftPercentage = ResourceGroupService.UsageReportSuppressionTolerancePercentage; if (currentBytesUsed > 0) { long diff = abs(currentBytesUsed - lastReportedBytes); - float diffPercentage = (diff / currentBytesUsed) * 100; + float diffPercentage = (float) diff * 100 / lastReportedBytes; if (diffPercentage > toleratedDriftPercentage) { return true; } @@ -116,7 +116,7 @@ public boolean needToReportLocalUsage(long currentBytesUsed, long lastReportedBy if (currentMessagesUsed > 0) { long diff = abs(currentMessagesUsed - lastReportedMessages); - float diffPercentage = (diff / currentMessagesUsed) * 100; + float diffPercentage = (float) diff * 100 / lastReportedMessages; if (diffPercentage > toleratedDriftPercentage) { return true; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTopicTransportManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTopicTransportManager.java index d3584a81cafde..98269910fec3a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTopicTransportManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTopicTransportManager.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.resourcegroup; import static org.apache.pulsar.client.api.CompressionType.LZ4; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.collect.Sets; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; @@ -79,7 +80,7 @@ private Producer createProducer() throws PulsarClientException { public ResourceUsageWriterTask() throws PulsarClientException { producer = createProducer(); resourceUsagePublishTask = pulsarService.getExecutor().scheduleAtFixedRate( - this, + catchingAndLoggingThrowables(this), pulsarService.getConfig().getResourceUsageTransportPublishIntervalInSecs(), pulsarService.getConfig().getResourceUsageTransportPublishIntervalInSecs(), TimeUnit.SECONDS); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java index 76ce022346fc1..86e8956d950d7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java @@ -21,7 +21,7 @@ import io.netty.buffer.ByteBuf; import java.io.IOException; import java.net.URI; -import java.net.URISyntaxException; +import java.net.URL; import java.nio.ByteBuffer; import java.sql.Time; import java.sql.Timestamp; @@ -41,6 +41,7 @@ import javax.ws.rs.container.AsyncResponse; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; +import javax.ws.rs.core.UriBuilder; import lombok.extern.slf4j.Slf4j; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericDatumReader; @@ -300,7 +301,7 @@ private void processPublishMessageResults(List produceMessageResult produceMessageResults.get(index).setMessageId(messageId.toString()); } catch (Exception e) { if (log.isDebugEnabled()) { - log.debug("Fail publish [{}] message with rest produce message request for topic {}: {} ", + log.debug("Fail publish [{}] message with rest produce message request for topic {}", index, topicName); } if (e instanceof BrokerServiceException.TopicNotFoundException) { @@ -378,10 +379,14 @@ private void processLookUpResult(List redirectAddresses, AsyncResponse log.debug("Redirect rest produce request for topic {} from {} to {}.", topicName, pulsar().getWebServiceAddress(), redirectAddresses.get(0)); } - URI redirectURI = new URI(String.format("%s%s", redirectAddresses.get(0), uri.getPath(false))); + URL redirectAddress = new URL(redirectAddresses.get(0)); + URI redirectURI = UriBuilder.fromUri(uri.getRequestUri()) + .host(redirectAddress.getHost()) + .port(redirectAddress.getPort()) + .build(); asyncResponse.resume(Response.temporaryRedirect(redirectURI).build()); future.complete(true); - } catch (URISyntaxException | NullPointerException e) { + } catch (Exception e) { if (log.isDebugEnabled()) { log.error("Error in preparing redirect url with rest produce message request for topic {}: {}", topicName, e.getMessage(), e); @@ -431,7 +436,8 @@ private CompletableFuture lookUpBrokerForTopic(TopicName partitionedTopicN partitionedTopicName, result.getLookupData()); } pulsar().getBrokerService().getOwningTopics().computeIfAbsent(partitionedTopicName - .getPartitionedTopicName(), (key) -> new ConcurrentOpenHashSet()) + .getPartitionedTopicName(), + (key) -> ConcurrentOpenHashSet.newBuilder().build()) .add(partitionedTopicName.getPartitionIndex()); completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future); } else { @@ -641,7 +647,7 @@ private List buildMessage(ProducerMessages producerMessages, Schema sch } } if (null != message.getEventTime() && !message.getEventTime().isEmpty()) { - messageMetadata.setEventTime(Long.valueOf(message.getEventTime())); + messageMetadata.setEventTime(Long.parseLong(message.getEventTime())); } if (message.isDisableReplication()) { messageMetadata.clearReplicateTo(); @@ -756,7 +762,8 @@ && pulsar().getBrokerService().isAuthorizationEnabled()) { } boolean isAuthorized = pulsar().getBrokerService().getAuthorizationService() - .canProduce(topicName, originalPrincipal(), clientAuthData()); + .canProduce(topicName, originalPrincipal() == null ? clientAppId() : originalPrincipal(), + clientAuthData()); if (!isAuthorized) { throw new RestException(Status.UNAUTHORIZED, String.format("Unauthorized to produce to topic %s" + " with clientAppId [%s] and authdata %s", topicName.toString(), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java index f98cfe59c0813..3f5932a2d34bd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractBaseDispatcher.java @@ -22,14 +22,17 @@ import io.netty.buffer.ByteBuf; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Optional; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedCursor; +import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.intercept.BrokerInterceptor; +import org.apache.pulsar.broker.service.persistent.CompactorSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.api.proto.CommandAck.AckType; @@ -126,18 +129,22 @@ public void filterEntriesForConsumer(Optional entryWrapper, int if (!isReplayRead && msgMetadata != null && msgMetadata.hasTxnidMostBits() && msgMetadata.hasTxnidLeastBits()) { if (Markers.isTxnMarker(msgMetadata)) { + // because consumer can receive message is smaller than maxReadPosition, + // so this marker is useless for this subscription + individualAcknowledgeMessageIfNeeded(entry.getPosition(), Collections.emptyMap()); entries.set(i, null); entry.release(); continue; } else if (((PersistentTopic) subscription.getTopic()) .isTxnAborted(new TxnID(msgMetadata.getTxnidMostBits(), msgMetadata.getTxnidLeastBits()))) { - subscription.acknowledgeMessage(Collections.singletonList(entry.getPosition()), AckType.Individual, - Collections.emptyMap()); + individualAcknowledgeMessageIfNeeded(entry.getPosition(), Collections.emptyMap()); entries.set(i, null); entry.release(); continue; } - } else if (msgMetadata == null || Markers.isServerOnlyMarker(msgMetadata)) { + } + + if (msgMetadata == null || Markers.isServerOnlyMarker(msgMetadata)) { PositionImpl pos = (PositionImpl) entry.getPosition(); // Message metadata was corrupted or the messages was a server-only marker @@ -147,11 +154,9 @@ public void filterEntriesForConsumer(Optional entryWrapper, int entries.set(i, null); entry.release(); - subscription.acknowledgeMessage(Collections.singletonList(pos), AckType.Individual, - Collections.emptyMap()); + individualAcknowledgeMessageIfNeeded(pos, Collections.emptyMap()); continue; - } else if (msgMetadata.hasDeliverAtTime() - && trackDelayedDelivery(entry.getLedgerId(), entry.getEntryId(), msgMetadata)) { + } else if (trackDelayedDelivery(entry.getLedgerId(), entry.getEntryId(), msgMetadata)) { // The message is marked for delayed delivery. Ignore for now. entries.set(i, null); entry.release(); @@ -184,6 +189,12 @@ && trackDelayedDelivery(entry.getLedgerId(), entry.getEntryId(), msgMetadata)) { sendMessageInfo.setTotalChunkedMessages(totalChunkedMessages); } + private void individualAcknowledgeMessageIfNeeded(Position position, Map properties) { + if (!(subscription instanceof CompactorSubscription)) { + subscription.acknowledgeMessage(Collections.singletonList(position), AckType.Individual, properties); + } + } + /** * Determine whether the number of consumers on the subscription reaches the threshold. * @return diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java index 690a5984b4828..8cab06be116af 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractDispatcherSingleActiveConsumer.java @@ -26,6 +26,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.service.BrokerServiceException.ConsumerBusyException; import org.apache.pulsar.broker.service.BrokerServiceException.ServerMetadataException; @@ -45,7 +46,7 @@ public abstract class AbstractDispatcherSingleActiveConsumer extends AbstractBas protected boolean isKeyHashRangeFiltered = false; protected CompletableFuture closeFuture = null; protected final int partitionIndex; - + protected final ManagedCursor cursor; // This dispatcher supports both the Exclusive and Failover subscription types protected final SubType subscriptionType; @@ -59,12 +60,13 @@ public abstract class AbstractDispatcherSingleActiveConsumer extends AbstractBas public AbstractDispatcherSingleActiveConsumer(SubType subscriptionType, int partitionIndex, String topicName, Subscription subscription, - ServiceConfiguration serviceConfig) { + ServiceConfiguration serviceConfig, ManagedCursor cursor) { super(subscription, serviceConfig); this.topicName = topicName; this.consumers = new CopyOnWriteArrayList<>(); this.partitionIndex = partitionIndex; this.subscriptionType = subscriptionType; + this.cursor = cursor; ACTIVE_CONSUMER_UPDATER.set(this, null); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java index b7e85dadb2464..e4b00cb992e77 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractReplicator.java @@ -32,6 +32,7 @@ import org.apache.pulsar.client.impl.ProducerImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.util.FutureUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,14 +65,14 @@ protected enum State { } public AbstractReplicator(String topicName, String replicatorPrefix, String localCluster, String remoteCluster, - BrokerService brokerService) throws NamingException, PulsarServerException { - validatePartitionedTopic(topicName, brokerService); + BrokerService brokerService, PulsarClientImpl replicationClient) + throws PulsarServerException { this.brokerService = brokerService; this.topicName = topicName; this.replicatorPrefix = replicatorPrefix; this.localCluster = localCluster.intern(); this.remoteCluster = remoteCluster.intern(); - this.replicationClient = (PulsarClientImpl) brokerService.getReplicationClient(remoteCluster); + this.replicationClient = replicationClient; this.client = (PulsarClientImpl) brokerService.pulsar().getClient(); this.producer = null; this.producerQueueSize = brokerService.pulsar().getConfiguration().getReplicationProducerQueueSize(); @@ -82,7 +83,8 @@ public AbstractReplicator(String topicName, String replicatorPrefix, String loca .enableBatching(false) .sendTimeout(0, TimeUnit.SECONDS) // .maxPendingMessages(producerQueueSize) // - .producerName(getReplicatorName(replicatorPrefix, localCluster)); + .producerName(String.format("%s%s%s", getReplicatorName(replicatorPrefix, localCluster), + REPL_PRODUCER_NAME_DELIMITER, remoteCluster)); STATE_UPDATER.set(this, State.Stopped); } @@ -180,8 +182,10 @@ public synchronized CompletableFuture disconnect(boolean failIfHasBacklog) if (failIfHasBacklog && getNumberOfEntriesInBacklog() > 0) { CompletableFuture disconnectFuture = new CompletableFuture<>(); disconnectFuture.completeExceptionally(new TopicBusyException("Cannot close a replicator with backlog")); - log.debug("[{}][{} -> {}] Replicator disconnect failed since topic has backlog", topicName, localCluster, - remoteCluster); + if (log.isDebugEnabled()) { + log.debug("[{}][{} -> {}] Replicator disconnect failed since topic has backlog", topicName, localCluster + , remoteCluster); + } return disconnectFuture; } @@ -205,7 +209,7 @@ public synchronized CompletableFuture disconnect(boolean failIfHasBacklog) public CompletableFuture remove() { // No-op - return null; + return CompletableFuture.completedFuture(null); } protected boolean isWritable() { @@ -242,20 +246,18 @@ public static String getReplicatorName(String replicatorPrefix, String cluster) * @param topic * @param brokerService */ - private void validatePartitionedTopic(String topic, BrokerService brokerService) throws NamingException { + public static CompletableFuture validatePartitionedTopicAsync(String topic, BrokerService brokerService) { TopicName topicName = TopicName.get(topic); - boolean isPartitionedTopic = false; - try { - isPartitionedTopic = - brokerService.pulsar().getPulsarResources().getNamespaceResources().getPartitionedTopicResources() - .partitionedTopicExists(topicName); - } catch (Exception e) { - log.warn("Failed to verify partitioned topic {}-{}", topicName, e.getMessage()); - } - if (isPartitionedTopic) { - throw new NamingException( - topicName + " is a partitioned-topic and replication can't be started for partitioned-producer "); - } + return brokerService.pulsar().getPulsarResources().getNamespaceResources().getPartitionedTopicResources() + .partitionedTopicExistsAsync(topicName).thenCompose(isPartitionedTopic -> { + if (isPartitionedTopic) { + String s = topicName + + " is a partitioned-topic and replication can't be started for partitioned-producer "; + log.error(s); + return FutureUtil.failedFuture(new NamingException(s)); + } + return CompletableFuture.completedFuture(null); + }); } private static final Logger log = LoggerFactory.getLogger(AbstractReplicator.class); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractSubscription.java new file mode 100644 index 0000000000000..6a38667055679 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractSubscription.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import java.util.Optional; +import java.util.concurrent.atomic.LongAdder; +import java.util.function.ToLongFunction; + +public abstract class AbstractSubscription implements Subscription { + protected final LongAdder bytesOutFromRemovedConsumers = new LongAdder(); + protected final LongAdder msgOutFromRemovedConsumer = new LongAdder(); + + public long getMsgOutCounter() { + return msgOutFromRemovedConsumer.longValue() + sumConsumers(Consumer::getMsgOutCounter); + } + + public long getBytesOutCounter() { + return bytesOutFromRemovedConsumers.longValue() + sumConsumers(Consumer::getBytesOutCounter); + } + + private long sumConsumers(ToLongFunction toCounter) { + return Optional.ofNullable(getDispatcher()) + .map(dispatcher -> dispatcher.getConsumers().stream().mapToLong(toCounter).sum()) + .orElse(0L); + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java index 951d6025eb51b..7c8d3b2422372 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/AbstractTopic.java @@ -30,10 +30,10 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.ToLongFunction; import lombok.Getter; import org.apache.bookkeeper.mledger.util.StatsBuckets; import org.apache.commons.lang3.tuple.Pair; @@ -93,9 +93,11 @@ public abstract class AbstractTopic implements Topic { // Whether messages published must be encrypted or not in this topic protected volatile boolean isEncryptionRequired = false; + + @Getter protected volatile SchemaCompatibilityStrategy schemaCompatibilityStrategy = SchemaCompatibilityStrategy.FULL; - protected volatile boolean isAllowAutoUpdateSchema = true; + protected volatile Boolean isAllowAutoUpdateSchema; // schema validation enforced flag protected volatile boolean schemaValidationEnforced = false; @@ -115,6 +117,10 @@ public abstract class AbstractTopic implements Topic { private LongAdder bytesInCounter = new LongAdder(); private LongAdder msgInCounter = new LongAdder(); + private static final AtomicLongFieldUpdater RATE_LIMITED_UPDATER = + AtomicLongFieldUpdater.newUpdater(AbstractTopic.class, "publishRateLimitedTimes"); + protected volatile long publishRateLimitedTimes = 0; + protected volatile Optional topicEpoch = Optional.empty(); private volatile boolean hasExclusiveProducer; // pointer to the exclusive producer @@ -131,6 +137,9 @@ public abstract class AbstractTopic implements Topic { private volatile long lastTopicMaxMessageSizeCheckTimeStamp = 0; private final long topicMaxMessageSizeCheckIntervalMs; + protected final LongAdder msgOutFromRemovedSubscriptions = new LongAdder(); + protected final LongAdder bytesOutFromRemovedSubscriptions = new LongAdder(); + public AbstractTopic(String topic, BrokerService brokerService) { this.topic = topic; this.brokerService = brokerService; @@ -149,6 +158,10 @@ public AbstractTopic(String topic, BrokerService brokerService) { this.preciseTopicPublishRateLimitingEnable = brokerService.pulsar().getConfiguration().isPreciseTopicPublishRateLimiterEnable(); updatePublishDispatcher(Optional.empty()); + if (isSystemTopic()) { + schemaCompatibilityStrategy = + brokerService.pulsar().getConfig().getSystemTopicSchemaCompatibilityStrategy(); + } } protected boolean isProducersExceeded() { @@ -251,6 +264,14 @@ protected CompletableFuture addConsumerToSubscription(Subscription subscri return subscription.addConsumer(consumer); } + protected Consumer getActiveConsumer(Subscription subscription) { + Dispatcher dispatcher = subscription.getDispatcher(); + if (dispatcher instanceof AbstractDispatcherSingleActiveConsumer) { + return ((AbstractDispatcherSingleActiveConsumer) dispatcher).getActiveConsumer(); + } + return null; + } + @Override public void disableCnxAutoRead() { producers.values().forEach(producer -> producer.getCnx().disableCnxAutoRead()); @@ -262,14 +283,15 @@ public void enableCnxAutoRead() { } protected boolean hasLocalProducers() { - AtomicBoolean foundLocal = new AtomicBoolean(false); - producers.values().forEach(producer -> { + if (producers.isEmpty()) { + return false; + } + for (Producer producer : producers.values()) { if (!producer.isRemote()) { - foundLocal.set(true); + return true; } - }); - - return foundLocal.get(); + } + return false; } @Override @@ -328,20 +350,31 @@ public CompletableFuture addSchema(SchemaData schema) { String base = TopicName.get(getName()).getPartitionedTopicName(); String id = TopicName.get(base).getSchemaName(); SchemaRegistryService schemaRegistryService = brokerService.pulsar().getSchemaRegistryService(); - return isAllowAutoUpdateSchema ? schemaRegistryService - .putSchemaIfAbsent(id, schema, schemaCompatibilityStrategy) - : schemaRegistryService.trimDeletedSchemaAndGetList(id).thenCompose(schemaAndMetadataList -> - schemaRegistryService.getSchemaVersionBySchemaData(schemaAndMetadataList, schema) - .thenCompose(schemaVersion -> { - if (schemaVersion == null) { - return FutureUtil - .failedFuture( - new IncompatibleSchemaException( - "Schema not found and schema auto updating is disabled.")); - } else { - return CompletableFuture.completedFuture(schemaVersion); - } - })); + + if (allowAutoUpdateSchema()) { + return schemaRegistryService.putSchemaIfAbsent(id, schema, schemaCompatibilityStrategy); + } else { + return schemaRegistryService.trimDeletedSchemaAndGetList(id).thenCompose(schemaAndMetadataList -> + schemaRegistryService.getSchemaVersionBySchemaData(schemaAndMetadataList, schema) + .thenCompose(schemaVersion -> { + if (schemaVersion == null) { + return FutureUtil.failedFuture(new IncompatibleSchemaException( + "Schema not found and schema auto updating is disabled.")); + } else { + return CompletableFuture.completedFuture(schemaVersion); + } + })); + } + } + + private boolean allowAutoUpdateSchema() { + if (brokerService.isSystemTopic(topic)) { + return true; + } + if (isAllowAutoUpdateSchema == null) { + return brokerService.pulsar().getConfig().isAllowAutoUpdateSchemaEnabled(); + } + return isAllowAutoUpdateSchema; } @Override @@ -521,17 +554,22 @@ public void recordAddLatency(long latency, TimeUnit unit) { } protected void setSchemaCompatibilityStrategy(Policies policies) { - if (policies.schema_compatibility_strategy == SchemaCompatibilityStrategy.UNDEFINED) { - schemaCompatibilityStrategy = brokerService.pulsar() - .getConfig().getSchemaCompatibilityStrategy(); - if (schemaCompatibilityStrategy == SchemaCompatibilityStrategy.UNDEFINED) { - schemaCompatibilityStrategy = SchemaCompatibilityStrategy.fromAutoUpdatePolicy( - policies.schema_auto_update_compatibility_strategy); + if (isSystemTopic()) { + schemaCompatibilityStrategy = + brokerService.pulsar().getConfig().getSystemTopicSchemaCompatibilityStrategy(); + return; + } + + schemaCompatibilityStrategy = policies.schema_compatibility_strategy; + if (SchemaCompatibilityStrategy.isUndefined(schemaCompatibilityStrategy)) { + schemaCompatibilityStrategy = SchemaCompatibilityStrategy.fromAutoUpdatePolicy( + policies.schema_auto_update_compatibility_strategy); + if (SchemaCompatibilityStrategy.isUndefined(schemaCompatibilityStrategy)) { + schemaCompatibilityStrategy = brokerService.pulsar().getConfig().getSchemaCompatibilityStrategy(); } - } else { - schemaCompatibilityStrategy = policies.schema_compatibility_strategy; } } + private static final Summary PUBLISH_LATENCY = Summary.build("pulsar_broker_publish_latency", "-") .quantile(0.0) .quantile(0.50) @@ -608,6 +646,11 @@ protected void checkTopicFenced() throws BrokerServiceException { } } + @Override + public long increasePublishLimitedTimes() { + return RATE_LIMITED_UPDATER.incrementAndGet(this); + } + protected void internalAddProducer(Producer producer) throws BrokerServiceException { if (isProducersExceeded()) { log.warn("[{}] Attempting to add producer to topic which reached max producers limit", topic); @@ -631,13 +674,9 @@ protected void internalAddProducer(Producer producer) throws BrokerServiceExcept private void tryOverwriteOldProducer(Producer oldProducer, Producer newProducer) throws BrokerServiceException { - boolean canOverwrite = false; - if (oldProducer.equals(newProducer) && !isUserProvidedProducerName(oldProducer) - && !isUserProvidedProducerName(newProducer) && newProducer.getEpoch() > oldProducer.getEpoch()) { + if (newProducer.isSuccessorTo(oldProducer) && !isUserProvidedProducerName(oldProducer) + && !isUserProvidedProducerName(newProducer)) { oldProducer.close(false); - canOverwrite = true; - } - if (canOverwrite) { if (!producers.replace(newProducer.getProducerName(), oldProducer, newProducer)) { // Met concurrent update, throw exception here so that client can try reconnect later. throw new BrokerServiceException.NamingException("Producer with name '" + newProducer.getProducerName() @@ -767,7 +806,7 @@ public PublishRateLimiter getBrokerPublishRateLimiter() { } public void updateMaxPublishRate(Policies policies) { - updatePublishDispatcher(Optional.of(policies)); + updatePublishDispatcher(Optional.ofNullable(policies)); } private void updatePublishDispatcher(Optional optPolicies) { @@ -812,9 +851,7 @@ private void updatePublishDispatcher(Optional optPolicies) { } // attach the resource-group level rate limiters, if set - String rgName = policies != null && policies.resource_group_name != null - ? policies.resource_group_name - : null; + String rgName = policies.resource_group_name; if (rgName != null) { final ResourceGroup resourceGroup = brokerService.getPulsar().getResourceGroupServiceManager().resourceGroupGet(rgName); @@ -846,11 +883,20 @@ public long getBytesInCounter() { } public long getMsgOutCounter() { - return getStats(false, false).msgOutCounter; + return msgOutFromRemovedSubscriptions.longValue() + + sumSubscriptions(AbstractSubscription::getMsgOutCounter); } public long getBytesOutCounter() { - return getStats(false, false).bytesOutCounter; + return bytesOutFromRemovedSubscriptions.longValue() + + sumSubscriptions(AbstractSubscription::getBytesOutCounter); + } + + private long sumSubscriptions(ToLongFunction toCounter) { + return getSubscriptions().values().stream() + .map(AbstractSubscription.class::cast) + .mapToLong(toCounter) + .sum(); } public boolean isDeleteWhileInactive() { @@ -934,6 +980,9 @@ protected void updatePublishDispatcher(PublishRate publishRate) { } } else { log.info("Disabling publish throttling for {}", this.topic); + if (topicPublishRateLimiter != null) { + topicPublishRateLimiter.close(); + } this.topicPublishRateLimiter = PublishRateLimiter.DISABLED_RATE_LIMITER; enableProducerReadForPublishRateLimiting(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java index 042f9ff0b3ffa..ee12c3ff7438d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BacklogQuotaManager.java @@ -26,9 +26,10 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedCursor.IndividualDeletedEntries; +import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; -import org.apache.bookkeeper.mledger.proto.MLDataFormats; +import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.resources.NamespaceResources; import org.apache.pulsar.broker.service.persistent.PersistentTopic; @@ -238,17 +239,26 @@ private void dropBacklogForTimeLimit(PersistentTopic persistentTopic, BacklogQuo Long currentMillis = ((ManagedLedgerImpl) persistentTopic.getManagedLedger()).getClock().millis(); ManagedLedgerImpl mLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); try { - Long ledgerId = mLedger.getCursors().getSlowestReaderPosition().getLedgerId(); - MLDataFormats.ManagedLedgerInfo.LedgerInfo ledgerInfo = mLedger.getLedgerInfo(ledgerId).get(); - // Timestamp only > 0 if ledger has been closed - while (ledgerInfo.getTimestamp() > 0 - && currentMillis - ledgerInfo.getTimestamp() > quota.getLimitTime()) { + for (;;) { ManagedCursor slowestConsumer = mLedger.getSlowestConsumer(); - // skip whole ledger for the slowest cursor - slowestConsumer.resetCursor(mLedger.getNextValidPosition( - PositionImpl.get(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1))); - ledgerId = mLedger.getCursors().getSlowestReaderPosition().getLedgerId(); - ledgerInfo = mLedger.getLedgerInfo(ledgerId).get(); + Position oldestPosition = slowestConsumer.getMarkDeletedPosition(); + ManagedLedgerInfo.LedgerInfo ledgerInfo = mLedger.getLedgerInfo(oldestPosition.getLedgerId()).get(); + if (ledgerInfo == null) { + slowestConsumer.resetCursor(mLedger.getNextValidPosition((PositionImpl) oldestPosition)); + continue; + } + // Timestamp only > 0 if ledger has been closed + if (ledgerInfo.getTimestamp() > 0 + && currentMillis - ledgerInfo.getTimestamp() > quota.getLimitTime()) { + // skip whole ledger for the slowest cursor + PositionImpl nextPosition = mLedger.getNextValidPosition( + PositionImpl.get(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1)); + if (!nextPosition.equals(oldestPosition)) { + slowestConsumer.resetCursor(nextPosition); + continue; + } + } + break; } } catch (Exception e) { log.error("[{}] Error resetting cursor for slowest consumer [{}]", persistentTopic.getName(), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java index c3a241d6077ee..3418d9f08e391 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java @@ -23,7 +23,7 @@ import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun; import static org.apache.commons.collections.CollectionUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.isNotBlank; -import static org.apache.pulsar.common.events.EventsTopicNames.checkTopicIsEventsNames; +import static org.apache.pulsar.broker.PulsarService.isTransactionSystemTopic; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -76,7 +76,6 @@ import lombok.Getter; import lombok.Setter; import org.apache.bookkeeper.common.util.OrderedExecutor; -import org.apache.bookkeeper.common.util.OrderedScheduler; import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteLedgerCallback; import org.apache.bookkeeper.mledger.AsyncCallbacks.OpenLedgerCallback; import org.apache.bookkeeper.mledger.LedgerOffloader; @@ -85,6 +84,7 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.ManagedLedgerException.ManagedLedgerNotFoundException; import org.apache.bookkeeper.mledger.ManagedLedgerFactory; +import org.apache.bookkeeper.mledger.impl.NullLedgerOffloader; import org.apache.bookkeeper.mledger.util.Futures; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.ImmutablePair; @@ -101,6 +101,7 @@ import org.apache.pulsar.broker.intercept.BrokerInterceptor; import org.apache.pulsar.broker.intercept.ManagedLedgerInterceptorImpl; import org.apache.pulsar.broker.loadbalance.LoadManager; +import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.resources.LocalPoliciesResources; import org.apache.pulsar.broker.resources.NamespaceResources; import org.apache.pulsar.broker.resources.NamespaceResources.PartitionedTopicResources; @@ -116,7 +117,7 @@ import org.apache.pulsar.broker.stats.ClusterReplicationMetrics; import org.apache.pulsar.broker.stats.prometheus.metrics.ObserverGauge; import org.apache.pulsar.broker.stats.prometheus.metrics.Summary; -import org.apache.pulsar.broker.systopic.SystemTopicClient; +import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore; import org.apache.pulsar.broker.validator.BindAddressValidator; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminBuilder; @@ -125,9 +126,11 @@ import org.apache.pulsar.client.impl.ClientBuilderImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.configuration.BindAddress; import org.apache.pulsar.common.configuration.FieldContext; +import org.apache.pulsar.common.events.EventsTopicNames; import org.apache.pulsar.common.intercept.AppendIndexMetadataInterceptor; import org.apache.pulsar.common.intercept.BrokerEntryMetadataInterceptor; import org.apache.pulsar.common.intercept.BrokerEntryMetadataUtils; @@ -214,6 +217,7 @@ public class BrokerService implements Closeable { private AuthorizationService authorizationService = null; private final ScheduledExecutorService statsUpdater; + @Getter private final ScheduledExecutorService backlogQuotaChecker; protected final AtomicReference lookupRequestSemaphore; @@ -273,19 +277,30 @@ public BrokerService(PulsarService pulsar, EventLoopGroup eventLoopGroup) throws this.preciseTopicPublishRateLimitingEnable = pulsar.getConfiguration().isPreciseTopicPublishRateLimiterEnable(); this.managedLedgerFactory = pulsar.getManagedLedgerFactory(); - this.topics = new ConcurrentOpenHashMap<>(); - this.replicationClients = new ConcurrentOpenHashMap<>(); - this.clusterAdmins = new ConcurrentOpenHashMap<>(); + this.topics = + ConcurrentOpenHashMap.>>newBuilder() + .build(); + this.replicationClients = + ConcurrentOpenHashMap.newBuilder().build(); + this.clusterAdmins = + ConcurrentOpenHashMap.newBuilder().build(); this.keepAliveIntervalSeconds = pulsar.getConfiguration().getKeepAliveIntervalSeconds(); - this.configRegisteredListeners = new ConcurrentOpenHashMap<>(); + this.configRegisteredListeners = + ConcurrentOpenHashMap.>newBuilder().build(); this.pendingTopicLoadingQueue = Queues.newConcurrentLinkedQueue(); - this.multiLayerTopicsMap = new ConcurrentOpenHashMap<>(); - this.owningTopics = new ConcurrentOpenHashMap<>(); + this.multiLayerTopicsMap = ConcurrentOpenHashMap.>>newBuilder() + .build(); + this.owningTopics = ConcurrentOpenHashMap.>newBuilder() + .build(); this.pulsarStats = new PulsarStats(pulsar); - this.offlineTopicStatCache = new ConcurrentOpenHashMap<>(); + this.offlineTopicStatCache = + ConcurrentOpenHashMap.newBuilder().build(); - this.topicOrderedExecutor = OrderedScheduler.newSchedulerBuilder() + this.topicOrderedExecutor = OrderedExecutor.newBuilder() .numThreads(pulsar.getConfiguration().getNumWorkerThreadsForNonPersistentTopic()) .name("broker-topic-workers").build(); final DefaultThreadFactory acceptorThreadFactory = new DefaultThreadFactory("pulsar-acceptor"); @@ -315,7 +330,8 @@ public BrokerService(PulsarService pulsar, EventLoopGroup eventLoopGroup) throws this.backlogQuotaChecker = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-backlog-quota-checker")); this.authenticationService = new AuthenticationService(pulsar.getConfiguration()); - this.blockedDispatchers = new ConcurrentOpenHashSet<>(); + this.blockedDispatchers = + ConcurrentOpenHashSet.newBuilder().build(); // update dynamic configuration and register-listener updateConfigurationAndRegisterListeners(); this.lookupRequestSemaphore = new AtomicReference( @@ -330,7 +346,7 @@ public BrokerService(PulsarService pulsar, EventLoopGroup eventLoopGroup) throws log.info("Enabling per-broker unack-message limit {} and dispatcher-limit {} on blocked-broker", maxUnackedMessages, maxUnackedMsgsPerDispatcher); // block misbehaving dispatcher by checking periodically - pulsar.getExecutor().scheduleAtFixedRate(() -> checkUnAckMessageDispatching(), + pulsar.getExecutor().scheduleAtFixedRate(safeRun(this::checkUnAckMessageDispatching), 600, 30, TimeUnit.SECONDS); } else { this.maxUnackedMessages = 0; @@ -829,15 +845,17 @@ public void unloadNamespaceBundlesGracefully() { // unload all namespace-bundles gracefully long closeTopicsStartTime = System.nanoTime(); Set serviceUnits = pulsar.getNamespaceService().getOwnedServiceUnits(); - serviceUnits.forEach(su -> { - if (su instanceof NamespaceBundle) { - try { - pulsar.getNamespaceService().unloadNamespaceBundle(su, 1, TimeUnit.MINUTES).get(); - } catch (Exception e) { - log.warn("Failed to unload namespace bundle {}", su, e); + if (serviceUnits != null) { + serviceUnits.forEach(su -> { + if (su instanceof NamespaceBundle) { + try { + pulsar.getNamespaceService().unloadNamespaceBundle(su, 1, TimeUnit.MINUTES).get(); + } catch (Exception e) { + log.warn("Failed to unload namespace bundle {}", su, e); + } } - } - }); + }); + } double closeTopicsTimeSeconds = TimeUnit.NANOSECONDS.toMillis((System.nanoTime() - closeTopicsStartTime)) / 1000.0; @@ -970,6 +988,9 @@ public CompletableFuture deleteTopic(String topic, boolean forceDelete, bo } } + if (log.isDebugEnabled()) { + log.debug("Topic {} is not loaded, try to delete from metadata", topic); + } // Topic is not loaded, though we still might be able to delete from metadata TopicName tn = TopicName.get(topic); if (!tn.isPersistent()) { @@ -978,22 +999,78 @@ public CompletableFuture deleteTopic(String topic, boolean forceDelete, bo } CompletableFuture future = new CompletableFuture<>(); - managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), new DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - future.complete(null); - } - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - future.completeExceptionally(exception); + CompletableFuture deleteTopicAuthenticationFuture = new CompletableFuture<>(); + deleteTopicAuthenticationWithRetry(topic, deleteTopicAuthenticationFuture, 5); + deleteTopicAuthenticationFuture.whenComplete((v, ex) -> { + if (ex != null) { + future.completeExceptionally(ex); + return; } - }, null); + managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), new DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + future.complete(null); + } + + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + future.completeExceptionally(exception); + } + }, null); + }); + return future; } + public void deleteTopicAuthenticationWithRetry(String topic, CompletableFuture future, int count) { + if (count == 0) { + log.error("The number of retries has exhausted for topic {}", topic); + future.completeExceptionally(new MetadataStoreException("The number of retries has exhausted")); + return; + } + NamespaceName namespaceName = TopicName.get(topic).getNamespaceObject(); + // Check whether there are auth policies for the topic + pulsar.getPulsarResources().getNamespaceResources().getPoliciesAsync(namespaceName).thenAccept(optPolicies -> { + if (!optPolicies.isPresent() || !optPolicies.get().auth_policies.getTopicAuthentication() + .containsKey(topic)) { + // if there is no auth policy for the topic, just complete and return + if (log.isDebugEnabled()) { + log.debug("Authentication policies not found for topic {}", topic); + } + future.complete(null); + return; + } + pulsar.getPulsarResources().getNamespaceResources() + .setPoliciesAsync(TopicName.get(topic).getNamespaceObject(), p -> { + p.auth_policies.getTopicAuthentication().remove(topic); + return p; + }).thenAccept(v -> { + log.info("Successfully delete authentication policies for topic {}", topic); + future.complete(null); + }).exceptionally(ex1 -> { + if (ex1.getCause() instanceof MetadataStoreException.BadVersionException) { + log.warn( + "Failed to delete authentication policies because of bad version. " + + "Retry to delete authentication policies for topic {}", + topic); + deleteTopicAuthenticationWithRetry(topic, future, count - 1); + } else { + log.error("Failed to delete authentication policies for topic {}", topic, ex1); + future.completeExceptionally(ex1); + } + return null; + }); + }).exceptionally(ex -> { + log.error("Failed to get policies for topic {}", topic, ex); + future.completeExceptionally(ex); + return null; + }); + } + private CompletableFuture> createNonPersistentTopic(String topic) { + CompletableFuture> topicFuture = new CompletableFuture<>(); if (!pulsar.getConfiguration().isEnableNonPersistentTopics()) { if (log.isDebugEnabled()) { log.debug("Broker is unable to load non-persistent topic {}", topic); @@ -1003,27 +1080,40 @@ private CompletableFuture> createNonPersistentTopic(String topic } final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); NonPersistentTopic nonPersistentTopic = new NonPersistentTopic(topic, this); - - CompletableFuture> future = nonPersistentTopic.initialize() - .thenCompose(__ -> nonPersistentTopic.checkReplication()) - .thenApply(__ -> { - log.info("Created topic {}", nonPersistentTopic); - long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - topicCreateTimeMs; - pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs); - addTopicToStatsMaps(TopicName.get(topic), nonPersistentTopic); - return Optional.of(nonPersistentTopic); - }); - - future.exceptionally((ex) -> { - log.warn("Replication check failed. Removing topic from topics list {}, {}", topic, ex); - nonPersistentTopic.stopReplProducers().whenComplete((v, exception) -> { - pulsar.getExecutor().execute(() -> topics.remove(topic, future)); + CompletableFuture isOwner = checkTopicNsOwnership(topic); + isOwner.thenRun(() -> { + nonPersistentTopic.initialize() + .thenCompose(__ -> nonPersistentTopic.checkReplication()) + .thenRun(() -> { + log.info("Created topic {}", nonPersistentTopic); + long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - topicCreateTimeMs; + pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs); + addTopicToStatsMaps(TopicName.get(topic), nonPersistentTopic); + topicFuture.complete(Optional.of(nonPersistentTopic)); + }).exceptionally(ex -> { + log.warn("Replication check failed. Removing topic from topics list {}, {}", topic, ex.getCause()); + nonPersistentTopic.stopReplProducers().whenComplete((v, exception) -> { + pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); + topicFuture.completeExceptionally(ex); + }); + return null; }); - + }).exceptionally(e -> { + log.warn("CheckTopicNsOwnership fail when createNonPersistentTopic! {}", topic, e.getCause()); + // CheckTopicNsOwnership fail dont create nonPersistentTopic, when topic do lookup will find the correct + // broker. When client get non-persistent-partitioned topic + // metadata will the non-persistent-topic will be created. + // so we should add checkTopicNsOwnership logic otherwise the topic will be created + // if it dont own by this broker,we should return success + // otherwise it will keep retrying getPartitionedTopicMetadata + topicFuture.complete(Optional.of(nonPersistentTopic)); + // after get metadata return success, we should delete this topic from this broker, because this topic not + // owner by this broker and it don't initialize and checkReplication + pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); return null; }); - return future; + return topicFuture; } private CompletableFuture futureWithDeadline() { @@ -1031,7 +1121,7 @@ private CompletableFuture futureWithDeadline() { () -> FUTURE_DEADLINE_TIMEOUT_EXCEPTION); } - public PulsarClient getReplicationClient(String cluster) { + public PulsarClient getReplicationClient(String cluster, Optional clusterDataOp) { PulsarClient client = replicationClients.get(cluster); if (client != null) { return client; @@ -1039,12 +1129,18 @@ public PulsarClient getReplicationClient(String cluster) { return replicationClients.computeIfAbsent(cluster, key -> { try { - ClusterData data = pulsar.getPulsarResources().getClusterResources().getCluster(cluster) + ClusterData data = clusterDataOp .orElseThrow(() -> new MetadataStoreException.NotFoundException(cluster)); ClientBuilder clientBuilder = PulsarClient.builder() .enableTcpNoDelay(false) .connectionsPerBroker(pulsar.getConfiguration().getReplicationConnectionsPerBroker()) .statsInterval(0, TimeUnit.SECONDS); + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + clientBuilder.loadConf(PropertiesUtils.filterAndMapProperties(pulsar.getConfiguration().getProperties(), + "brokerClient_")); + if (data.getAuthenticationPlugin() != null && data.getAuthenticationParameters() != null) { clientBuilder.authentication(data.getAuthenticationPlugin(), data.getAuthenticationParameters()); } else if (pulsar.getConfiguration().isAuthenticationEnabled()) { @@ -1106,24 +1202,30 @@ private void configTlsSettings(ClientBuilder clientBuilder, String serviceUrl, } } - public PulsarAdmin getClusterPulsarAdmin(String cluster) { + public PulsarAdmin getClusterPulsarAdmin(String cluster, Optional clusterDataOp) { PulsarAdmin admin = clusterAdmins.get(cluster); if (admin != null) { return admin; } return clusterAdmins.computeIfAbsent(cluster, key -> { try { - ClusterData data = pulsar.getPulsarResources().getClusterResources().getCluster(cluster) + ClusterData data = clusterDataOp .orElseThrow(() -> new MetadataStoreException.NotFoundException(cluster)); ServiceConfiguration conf = pulsar.getConfig(); boolean isTlsUrl = conf.isBrokerClientTlsEnabled() && isNotBlank(data.getServiceUrlTls()); String adminApiUrl = isTlsUrl ? data.getServiceUrlTls() : data.getServiceUrl(); - PulsarAdminBuilder builder = PulsarAdmin.builder().serviceHttpUrl(adminApiUrl) - .authentication( - conf.getBrokerClientAuthenticationPlugin(), - conf.getBrokerClientAuthenticationParameters()); + PulsarAdminBuilder builder = PulsarAdmin.builder().serviceHttpUrl(adminApiUrl); + + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + builder.loadConf(PropertiesUtils.filterAndMapProperties(conf.getProperties(), "brokerClient_")); + + builder.authentication( + conf.getBrokerClientAuthenticationPlugin(), + conf.getBrokerClientAuthenticationParameters()); if (isTlsUrl) { builder.allowTlsInsecureConnection(conf.isTlsAllowInsecureConnection()); @@ -1177,7 +1279,7 @@ protected CompletableFuture> loadOrCreatePersistentTopic(final S final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get(); if (topicLoadSemaphore.tryAcquire()) { - createPersistentTopic(topic, createIfMissing, topicFuture); + checkOwnershipAndCreatePersistentTopic(topic, createIfMissing, topicFuture); topicFuture.handle((persistentTopic, ex) -> { // release permit and process pending topic topicLoadSemaphore.release(); @@ -1198,17 +1300,36 @@ protected CompletableFuture> loadOrCreatePersistentTopic(final S return topicFuture; } - private void createPersistentTopic(final String topic, boolean createIfMissing, + private void checkOwnershipAndCreatePersistentTopic(final String topic, boolean createIfMissing, CompletableFuture> topicFuture) { + TopicName topicName = TopicName.get(topic); + pulsar.getNamespaceService().isServiceUnitActiveAsync(topicName) + .thenAccept(isActive -> { + if (isActive) { + createPersistentTopic(topic, createIfMissing, topicFuture); + } else { + // namespace is being unloaded + String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic); + log.warn(msg); + pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); + topicFuture.completeExceptionally(new ServiceUnitNotReadyException(msg)); + } + }).exceptionally(ex -> { + topicFuture.completeExceptionally(ex); + return null; + }); + } + private void createPersistentTopic(final String topic, boolean createIfMissing, + CompletableFuture> topicFuture) { final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); TopicName topicName = TopicName.get(topic); - if (!pulsar.getNamespaceService().isServiceUnitActive(topicName)) { - // namespace is being unloaded - String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic); + + if (isTransactionSystemTopic(topicName)) { + String msg = String.format("Can not create transaction system topic %s", topic); log.warn(msg); pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); - topicFuture.completeExceptionally(new ServiceUnitNotReadyException(msg)); + topicFuture.completeExceptionally(new NotAllowedException(msg)); return; } @@ -1262,9 +1383,9 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { if (topicFuture.isCompletedExceptionally()) { log.warn("{} future is already completed with failure {}, closing the topic", topic, FutureUtil.getException(topicFuture)); - persistentTopic.stopReplProducers().whenComplete((v, exception) -> { + persistentTopic.stopReplProducers().whenCompleteAsync((v, exception) -> { topics.remove(topic, topicFuture); - }); + }, executor()); } else { addTopicToStatsMaps(topicName, persistentTopic); topicFuture.complete(Optional.of(persistentTopic)); @@ -1274,14 +1395,14 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { "Replication or dedup check failed." + " Removing topic from topics list {}, {}", topic, ex); - persistentTopic.stopReplProducers().whenComplete((v, exception) -> { + persistentTopic.stopReplProducers().whenCompleteAsync((v, exception) -> { topics.remove(topic, topicFuture); topicFuture.completeExceptionally(ex); - }); + }, executor()); return null; }); - } catch (NamingException | PulsarServerException e) { + } catch (PulsarServerException e) { log.warn("Failed to create topic {}-{}", topic, e.getMessage()); pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture)); topicFuture.completeExceptionally(e); @@ -1412,18 +1533,22 @@ public CompletableFuture getManagedLedgerConfig(TopicName t topicLevelOffloadPolicies, OffloadPoliciesImpl.oldPoliciesCompatible(nsLevelOffloadPolicies, policies.orElse(null)), getPulsar().getConfig().getProperties()); - if (topicLevelOffloadPolicies != null) { - try { - LedgerOffloader topicLevelLedgerOffLoader = - pulsar().createManagedLedgerOffloader(offloadPolicies); - managedLedgerConfig.setLedgerOffloader(topicLevelLedgerOffLoader); - } catch (PulsarServerException e) { - throw new RuntimeException(e); + if (NamespaceService.isSystemServiceNamespace(namespace.toString())) { + managedLedgerConfig.setLedgerOffloader(NullLedgerOffloader.INSTANCE); + } else { + if (topicLevelOffloadPolicies != null) { + try { + LedgerOffloader topicLevelLedgerOffLoader = + pulsar().createManagedLedgerOffloader(offloadPolicies); + managedLedgerConfig.setLedgerOffloader(topicLevelLedgerOffLoader); + } catch (PulsarServerException e) { + throw new RuntimeException(e); + } + } else { + //If the topic level policy is null, use the namespace level + managedLedgerConfig + .setLedgerOffloader(pulsar.getManagedLedgerOffloader(namespace, offloadPolicies)); } - } else { - //If the topic level policy is null, use the namespace level - managedLedgerConfig - .setLedgerOffloader(pulsar.getManagedLedgerOffloader(namespace, offloadPolicies)); } managedLedgerConfig.setDeletionAtBatchIndexLevelEnabled( @@ -1442,8 +1567,12 @@ private void addTopicToStatsMaps(TopicName topicName, Topic topic) { synchronized (multiLayerTopicsMap) { String serviceUnit = namespaceBundle.toString(); multiLayerTopicsMap // - .computeIfAbsent(topicName.getNamespace(), k -> new ConcurrentOpenHashMap<>()) // - .computeIfAbsent(serviceUnit, k -> new ConcurrentOpenHashMap<>()) // + .computeIfAbsent(topicName.getNamespace(), + k -> ConcurrentOpenHashMap.>newBuilder() + .build()) // + .computeIfAbsent(serviceUnit, + k -> ConcurrentOpenHashMap.newBuilder().build()) // .put(topicName.toString(), topic); } } @@ -1604,7 +1733,7 @@ public BacklogQuotaManager getBacklogQuotaManager() { return this.backlogQuotaManager; } - public synchronized void monitorBacklogQuota() { + public void monitorBacklogQuota() { forEachTopic(topic -> { if (topic instanceof PersistentTopic) { PersistentTopic persistentTopic = (PersistentTopic) topic; @@ -1706,7 +1835,7 @@ public void cleanUnloadedTopicFromCache(NamespaceBundle serviceUnit) { TopicName topicName = TopicName.get(topic); if (serviceUnit.includes(topicName) && getTopicReference(topic).isPresent()) { log.info("[{}][{}] Clean unloaded topic from cache.", serviceUnit.toString(), topic); - pulsar.getBrokerService().removeTopicFromCache(topicName.toString(), serviceUnit); + pulsar.getBrokerService().removeTopicFromCache(topicName.toString(), serviceUnit, null); } } } @@ -1715,15 +1844,56 @@ public AuthorizationService getAuthorizationService() { return authorizationService; } - public CompletableFuture removeTopicFromCache(String topic) { + public CompletableFuture removeTopicFromCache(String topicName) { + return removeTopicFutureFromCache(topicName, null); + } + + public CompletableFuture removeTopicFromCache(Topic topic) { + Optional>> createTopicFuture = findTopicFutureInCache(topic); + if (createTopicFuture.isPresent()){ + return CompletableFuture.completedFuture(null); + } + return removeTopicFutureFromCache(topic.getName(), createTopicFuture.get()); + } + + private Optional>> findTopicFutureInCache(Topic topic){ + if (topic == null){ + return Optional.empty(); + } + final CompletableFuture> createTopicFuture = topics.get(topic.getName()); + // If not exists in cache, do nothing. + if (createTopicFuture == null){ + return Optional.empty(); + } + // If the future in cache is not yet complete, the topic instance in the cache is not the same with the topic. + if (!createTopicFuture.isDone()){ + return Optional.empty(); + } + // If the future in cache has exception complete, + // the topic instance in the cache is not the same with the topic. + if (createTopicFuture.isCompletedExceptionally()){ + return Optional.empty(); + } + Optional optionalTopic = createTopicFuture.join(); + Topic topicInCache = optionalTopic.orElse(null); + if (topicInCache == null || topicInCache != topic){ + return Optional.empty(); + } else { + return Optional.of(createTopicFuture); + } + } + + private CompletableFuture removeTopicFutureFromCache(String topic, + CompletableFuture> createTopicFuture) { TopicName topicName = TopicName.get(topic); return pulsar.getNamespaceService().getBundleAsync(topicName) .thenAccept(namespaceBundle -> { - removeTopicFromCache(topic, namespaceBundle); + removeTopicFromCache(topic, namespaceBundle, createTopicFuture); }); } - public void removeTopicFromCache(String topic, NamespaceBundle namespaceBundle) { + private void removeTopicFromCache(String topic, NamespaceBundle namespaceBundle, + CompletableFuture> createTopicFuture) { String bundleName = namespaceBundle.toString(); String namespaceName = TopicName.get(topic).getNamespaceObject().toString(); @@ -1750,14 +1920,16 @@ public void removeTopicFromCache(String topic, NamespaceBundle namespaceBundle) } } } - topics.remove(topic); - try { - Compactor compactor = pulsar.getCompactor(false); - if (compactor != null) { - compactor.getStats().removeTopic(topic); - } - } catch (PulsarServerException ignore) { + if (createTopicFuture == null) { + topics.remove(topic); + } else { + topics.remove(topic, createTopicFuture); + } + + Compactor compactor = pulsar.getNullableCompactor(); + if (compactor != null) { + compactor.getStats().removeTopic(topic); } } @@ -2155,9 +2327,16 @@ private void validateConfigKey(String key) { */ private void updateDynamicServiceConfiguration() { Optional> configCache = Optional.empty(); + try { - configCache = - Optional.of(pulsar().getPulsarResources().getDynamicConfigResources().getDynamicConfiguration()); + configCache = + pulsar().getPulsarResources().getDynamicConfigResources().getDynamicConfiguration(); + + // create dynamic-config if not exist. + if (!configCache.isPresent()) { + pulsar().getPulsarResources().getDynamicConfigResources() + .setDynamicConfigurationWithCreate(n -> Maps.newHashMap()); + } } catch (Exception e) { log.warn("Failed to read dynamic broker configuration", e); } @@ -2214,7 +2393,8 @@ public static boolean validateDynamicConfiguration(String key, String value) { } private static ConcurrentOpenHashMap prepareDynamicConfigurationMap() { - ConcurrentOpenHashMap dynamicConfigurationMap = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap dynamicConfigurationMap = + ConcurrentOpenHashMap.newBuilder().build(); for (Field field : ServiceConfiguration.class.getDeclaredFields()) { if (field != null && field.isAnnotationPresent(FieldContext.class)) { field.setAccessible(true); @@ -2227,7 +2407,8 @@ private static ConcurrentOpenHashMap prepareDynamicConfigur } private ConcurrentOpenHashMap getRuntimeConfigurationMap() { - ConcurrentOpenHashMap runtimeConfigurationMap = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap runtimeConfigurationMap = + ConcurrentOpenHashMap.newBuilder().build(); for (Field field : ServiceConfiguration.class.getDeclaredFields()) { if (field != null && field.isAnnotationPresent(FieldContext.class)) { field.setAccessible(true); @@ -2259,7 +2440,7 @@ private void createPendingLoadTopic() { CompletableFuture> pendingFuture = pendingTopic.getRight(); final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get(); final boolean acquiredPermit = topicLoadSemaphore.tryAcquire(); - createPersistentTopic(topic, true, pendingFuture); + checkOwnershipAndCreatePersistentTopic(topic, true, pendingFuture); pendingFuture.handle((persistentTopic, ex) -> { // release permit and process next pending topic if (acquiredPermit) { @@ -2303,7 +2484,20 @@ public CompletableFuture fetchPartitionedTopicMetadata pulsar.getBrokerService().createDefaultPartitionedTopicAsync(topicName) .thenAccept(md -> future.complete(md)) .exceptionally(ex -> { - future.completeExceptionally(ex); + if (ex.getCause() + instanceof MetadataStoreException.AlreadyExistsException) { + // The partitioned topic might be created concurrently + fetchPartitionedTopicMetadataAsync(topicName) + .whenComplete((metadata2, ex2) -> { + if (ex2 == null) { + future.complete(metadata2); + } else { + future.completeExceptionally(ex2); + } + }); + } else { + future.completeExceptionally(ex); + } return null; }); } else { @@ -2514,7 +2708,7 @@ public boolean isAllowAutoTopicCreation(final String topic) { public boolean isAllowAutoTopicCreation(final TopicName topicName) { //System topic can always be created automatically - if (pulsar.getConfiguration().isSystemTopicEnabled() && checkTopicIsEventsNames(topicName)) { + if (pulsar.getConfiguration().isSystemTopicEnabled() && isSystemTopic(topicName)) { return true; } AutoTopicCreationOverride autoTopicCreationOverride = getAutoTopicCreationOverride(topicName); @@ -2580,8 +2774,31 @@ private AutoSubscriptionCreationOverride getAutoSubscriptionCreationOverride(fin log.debug("No autoSubscriptionCreateOverride policy found for {}", topicName); return null; } - private boolean isSystemTopic(String topic) { - return SystemTopicClient.isSystemTopic(TopicName.get(topic)); + + public boolean isSystemTopic(String topic) { + return isSystemTopic(TopicName.get(topic)); + } + + public boolean isSystemTopic(TopicName topicName) { + if (topicName.getNamespaceObject().equals(NamespaceName.SYSTEM_NAMESPACE) + || topicName.getNamespaceObject().equals(pulsar.getHeartbeatNamespaceV1()) + || topicName.getNamespaceObject().equals(pulsar.getHeartbeatNamespaceV2())) { + return true; + } + + TopicName nonePartitionedTopicName = TopicName.get(topicName.getPartitionedTopicName()); + + // event topic + if (EventsTopicNames.checkTopicIsEventsNames(nonePartitionedTopicName)) { + return true; + } + + String localName = nonePartitionedTopicName.getLocalName(); + // transaction pending ack topic + if (StringUtils.endsWith(localName, MLPendingAckStore.PENDING_ACK_STORE_SUFFIX)) { + return true; + } + return false; } /** @@ -2602,11 +2819,20 @@ public Optional getTopicPolicies(TopicName topicName) { } public CompletableFuture deleteTopicPolicies(TopicName topicName) { - if (!pulsar().getConfig().isTopicLevelPoliciesEnabled()) { + final PulsarService pulsarService = pulsar(); + if (!pulsarService.getConfig().isTopicLevelPoliciesEnabled()) { return CompletableFuture.completedFuture(null); } - TopicName cloneTopicName = TopicName.get(topicName.getPartitionedTopicName()); - return pulsar.getTopicPoliciesService().deleteTopicPoliciesAsync(cloneTopicName); + return pulsarService.getPulsarResources().getNamespaceResources() + .getPoliciesAsync(topicName.getNamespaceObject()) + .thenComposeAsync(optPolicies -> { + if (optPolicies.isPresent() && optPolicies.get().deleted) { + // We can return the completed future directly if the namespace is already deleted. + return CompletableFuture.completedFuture(null); + } + TopicName cloneTopicName = TopicName.get(topicName.getPartitionedTopicName()); + return pulsar.getTopicPoliciesService().deleteTopicPoliciesAsync(cloneTopicName); + }); } private CompletableFuture checkMaxTopicsPerNamespace(TopicName topicName, int numPartitions) { @@ -2616,13 +2842,13 @@ private CompletableFuture checkMaxTopicsPerNamespace(TopicName topicName, int maxTopicsPerNamespace = optPolicies.map(p -> p.max_topics_per_namespace) .orElse(pulsar.getConfig().getMaxTopicsPerNamespace()); - if (maxTopicsPerNamespace > 0 && !SystemTopicClient.isSystemTopic(topicName)) { + if (maxTopicsPerNamespace > 0 && !isSystemTopic(topicName)) { return pulsar().getPulsarResources().getTopicResources() .getExistingPartitions(topicName) .thenCompose(topics -> { // exclude created system topic long topicsCount = topics.stream() - .filter(t -> !SystemTopicClient.isSystemTopic(TopicName.get(t))) + .filter(t -> !isSystemTopic(TopicName.get(t))) .count(); if (topicsCount + numPartitions > maxTopicsPerNamespace) { log.error("Failed to create persistent topic {}, " diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConnectionController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConnectionController.java index 51540e179be19..65c3a6c4f2a8b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConnectionController.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConnectionController.java @@ -36,7 +36,7 @@ public interface ConnectionController { * @param remoteAddress * @return */ - Sate increaseConnection(SocketAddress remoteAddress); + State increaseConnection(SocketAddress remoteAddress); /** * Decrease the number of connections counter. @@ -44,7 +44,7 @@ public interface ConnectionController { */ void decreaseConnection(SocketAddress remoteAddress); - enum Sate { + enum State { OK, REACH_MAX_CONNECTION_PER_IP, REACH_MAX_CONNECTION; } @@ -68,13 +68,13 @@ public DefaultConnectionController(ServiceConfiguration configuration) { } @Override - public Sate increaseConnection(SocketAddress remoteAddress) { + public State increaseConnection(SocketAddress remoteAddress) { if (!maxConnectionsLimitEnabled && !maxConnectionsLimitPerIpEnabled) { - return Sate.OK; + return State.OK; } if (!(remoteAddress instanceof InetSocketAddress) || !isLegalIpAddress(((InetSocketAddress) remoteAddress).getHostString())) { - return Sate.OK; + return State.OK; } lock.lock(); try { @@ -88,20 +88,20 @@ public Sate increaseConnection(SocketAddress remoteAddress) { if (maxConnectionsLimitEnabled && totalConnectionNum > maxConnections) { log.info("Reject connect request from {}, because reached the maximum number of connections {}", remoteAddress, totalConnectionNum); - return Sate.REACH_MAX_CONNECTION; + return State.REACH_MAX_CONNECTION; } if (maxConnectionsLimitPerIpEnabled && CONNECTIONS.get(ip).getValue() > maxConnectionPerIp) { log.info("Reject connect request from {}, because reached the maximum number " + "of connections per Ip {}", remoteAddress, CONNECTIONS.get(ip).getValue()); - return Sate.REACH_MAX_CONNECTION_PER_IP; + return State.REACH_MAX_CONNECTION_PER_IP; } } catch (Exception e) { log.error("increase connection failed", e); } finally { lock.unlock(); } - return Sate.OK; + return State.OK; } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java index 7b7a8307baa45..ac255b59464d8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ConsistentHashingStickyKeyConsumerSelector.java @@ -91,7 +91,7 @@ public void removeConsumer(Consumer consumer) { if (v == null) { return null; } else { - v.removeIf(c -> c.consumerName().equals(consumer.consumerName())); + v.removeIf(c -> c.equals(consumer)); if (v.isEmpty()) { v = null; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java index 9f4214481588c..25dd3b908e519 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java @@ -21,9 +21,11 @@ import static com.google.common.base.Preconditions.checkArgument; import com.google.common.base.MoreObjects; import com.google.common.collect.Lists; +import com.google.common.util.concurrent.AtomicDouble; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; import java.util.ArrayList; +import java.util.BitSet; import java.util.Collections; import java.util.List; import java.util.Map; @@ -35,8 +37,6 @@ import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; -import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap; -import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap.LongPair; import org.apache.commons.lang3.mutable.MutableInt; import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; @@ -56,6 +56,9 @@ import org.apache.pulsar.common.stats.Rate; import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.collections.BitSetRecyclable; +import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap; +import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair; import org.apache.pulsar.transaction.common.exception.TransactionConflictException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -80,6 +83,7 @@ public class Consumer { private final Rate msgRedeliver; private final LongAdder msgOutCounter; private final LongAdder bytesOutCounter; + private final Rate messageAckRate; private long lastConsumedTimestamp; private long lastAckedTimestamp; @@ -114,18 +118,18 @@ public class Consumer { /** * It starts keep tracking the average messages per entry. - * The initial value is 1000, when new value comes, it will update with + * The initial value is 0, when new value comes, it will update with * avgMessagesPerEntry = avgMessagePerEntry * avgPercent + (1 - avgPercent) * new Value. */ - private static final AtomicIntegerFieldUpdater AVG_MESSAGES_PER_ENTRY = - AtomicIntegerFieldUpdater.newUpdater(Consumer.class, "avgMessagesPerEntry"); - private volatile int avgMessagesPerEntry = 1000; + private final AtomicDouble avgMessagesPerEntry = new AtomicDouble(0); + private static final long [] EMPTY_ACK_SET = new long[0]; private static final double avgPercent = 0.9; private boolean preciseDispatcherFlowControl; private PositionImpl readPositionWhenJoining; private final String clientAddress; // IP address only, no port number included private final MessageId startMessageId; + private final boolean isAcknowledgmentAtBatchIndexLevelEnabled; public Consumer(Subscription subscription, SubType subType, String topicName, long consumerId, int priorityLevel, String consumerName, @@ -150,6 +154,7 @@ public Consumer(Subscription subscription, SubType subType, String topicName, lo this.msgRedeliver = new Rate(); this.bytesOutCounter = new LongAdder(); this.msgOutCounter = new LongAdder(); + this.messageAckRate = new Rate(); this.appId = appId; // Ensure we start from compacted view @@ -159,7 +164,6 @@ public Consumer(Subscription subscription, SubType subType, String topicName, lo PERMITS_RECEIVED_WHILE_CONSUMER_BLOCKED_UPDATER.set(this, 0); MESSAGE_PERMITS_UPDATER.set(this, 0); UNACKED_MESSAGES_UPDATER.set(this, 0); - AVG_MESSAGES_PER_ENTRY.set(this, 1000); this.metadata = metadata != null ? metadata : Collections.emptyMap(); @@ -175,13 +179,20 @@ public Consumer(Subscription subscription, SubType subType, String topicName, lo stats.metadata = this.metadata; if (Subscription.isIndividualAckMode(subType)) { - this.pendingAcks = new ConcurrentLongLongPairHashMap(256, 1); + this.pendingAcks = ConcurrentLongLongPairHashMap.newBuilder() + .autoShrink(subscription.getTopic().getBrokerService() + .getPulsar().getConfiguration().isAutoShrinkForConsumerPendingAcksMap()) + .expectedItems(256) + .concurrencyLevel(1) + .build(); } else { // We don't need to keep track of pending acks if the subscription is not shared this.pendingAcks = null; } this.clientAddress = cnx.clientSourceAddress(); + this.isAcknowledgmentAtBatchIndexLevelEnabled = subscription.getTopic().getBrokerService() + .getPulsar().getConfiguration().isAcknowledgmentAtBatchIndexLevelEnabled(); } public SubType subType() { @@ -233,31 +244,42 @@ public Future sendMessages(final List entries, EntryBatchSizes batc writePromise.setSuccess(null); return writePromise; } - - // Note - // Must ensure that the message is written to the pendingAcks before sent is first , because this consumer - // is possible to disconnect at this time. - if (pendingAcks != null) { - for (int i = 0; i < entries.size(); i++) { - Entry entry = entries.get(i); - if (entry != null) { + int unackedMessages = totalMessages; + int totalEntries = 0; + + for (int i = 0; i < entries.size(); i++) { + Entry entry = entries.get(i); + if (entry != null) { + totalEntries++; + // Note + // Must ensure that the message is written to the pendingAcks before sent is first, + // because this consumer is possible to disconnect at this time. + if (pendingAcks != null) { int batchSize = batchSizes.getBatchSize(i); int stickyKeyHash = getStickyKeyHash(entry); + long[] ackSet = getCursorAckSet(PositionImpl.get(entry.getLedgerId(), entry.getEntryId())); + if (ackSet != null) { + unackedMessages -= (batchSize - BitSet.valueOf(ackSet).cardinality()); + } pendingAcks.put(entry.getLedgerId(), entry.getEntryId(), batchSize, stickyKeyHash); - if (log.isDebugEnabled()){ + if (log.isDebugEnabled()) { log.debug("[{}-{}] Added {}:{} ledger entry with batchSize of {} to pendingAcks in" + " broker.service.Consumer for consumerId: {}", - topicName, subscription, entry.getLedgerId(), entry.getEntryId(), batchSize, consumerId); + topicName, subscription, entry.getLedgerId(), entry.getEntryId(), batchSize, + consumerId); } } } } // calculate avg message per entry - int tmpAvgMessagesPerEntry = AVG_MESSAGES_PER_ENTRY.get(this); - tmpAvgMessagesPerEntry = (int) Math.round(tmpAvgMessagesPerEntry * avgPercent - + (1 - avgPercent) * totalMessages / entries.size()); - AVG_MESSAGES_PER_ENTRY.set(this, tmpAvgMessagesPerEntry); + if (avgMessagesPerEntry.get() < 1) { //valid avgMessagesPerEntry should always >= 1 + // set init value. + avgMessagesPerEntry.set(1.0 * totalMessages / totalEntries); + } else { + avgMessagesPerEntry.set(avgMessagesPerEntry.get() * avgPercent + + (1 - avgPercent) * totalMessages / totalEntries); + } // reduce permit and increment unackedMsg count with total number of messages in batch-msgs int ackedCount = batchIndexesAcks == null ? 0 : batchIndexesAcks.getTotalAckedIndexCount(); @@ -265,9 +287,9 @@ public Future sendMessages(final List entries, EntryBatchSizes batc if (log.isDebugEnabled()){ log.debug("[{}-{}] Added {} minus {} messages to MESSAGE_PERMITS_UPDATER in broker.service.Consumer" + " for consumerId: {}; avgMessagesPerEntry is {}", - topicName, subscription, ackedCount, totalMessages, consumerId, tmpAvgMessagesPerEntry); + topicName, subscription, ackedCount, totalMessages, consumerId, avgMessagesPerEntry.get()); } - incrementUnackedMessages(totalMessages); + incrementUnackedMessages(unackedMessages); msgOut.recordMultipleEvents(totalMessages, totalBytes); msgOutCounter.add(totalMessages); bytesOutCounter.add(totalBytes); @@ -331,6 +353,8 @@ public void doUnsubscribe(final long requestId) { } public CompletableFuture messageAcked(CommandAck ack) { + CompletableFuture future; + this.lastAckedTimestamp = System.currentTimeMillis(); Map properties = Collections.emptyMap(); if (ack.getPropertiesCount() > 0) { @@ -341,11 +365,13 @@ public CompletableFuture messageAcked(CommandAck ack) { if (ack.getAckType() == AckType.Cumulative) { if (ack.getMessageIdsCount() != 1) { log.warn("[{}] [{}] Received multi-message ack", subscription, consumerId); + return CompletableFuture.completedFuture(null); } if (Subscription.isIndividualAckMode(subType)) { log.warn("[{}] [{}] Received cumulative ack on shared subscription, ignoring", subscription, consumerId); + return CompletableFuture.completedFuture(null); } PositionImpl position = PositionImpl.earliest; if (ack.getMessageIdsCount() == 1) { @@ -362,35 +388,45 @@ public CompletableFuture messageAcked(CommandAck ack) { } if (ack.hasTxnidMostBits() && ack.hasTxnidLeastBits()) { List positionsAcked = Collections.singletonList(position); - return transactionCumulativeAcknowledge(ack.getTxnidMostBits(), - ack.getTxnidLeastBits(), positionsAcked); + future = transactionCumulativeAcknowledge(ack.getTxnidMostBits(), + ack.getTxnidLeastBits(), positionsAcked) + .thenApply(v -> 1L); } else { List positionsAcked = Collections.singletonList(position); subscription.acknowledgeMessage(positionsAcked, AckType.Cumulative, properties); - return CompletableFuture.completedFuture(null); + future = CompletableFuture.completedFuture(1L); } } else { if (ack.hasTxnidLeastBits() && ack.hasTxnidMostBits()) { - return individualAckWithTransaction(ack); + future = individualAckWithTransaction(ack); } else { - return individualAckNormal(ack, properties); + future = individualAckNormal(ack, properties); } } + + return future.thenApply(v -> { + this.messageAckRate.recordEvent(v); + return null; + }); } //this method is for individual ack not carry the transaction - private CompletableFuture individualAckNormal(CommandAck ack, Map properties) { + private CompletableFuture individualAckNormal(CommandAck ack, Map properties) { List positionsAcked = new ArrayList<>(); - + long totalAckCount = 0; for (int i = 0; i < ack.getMessageIdsCount(); i++) { MessageIdData msgId = ack.getMessageIdAt(i); PositionImpl position; + long ackedCount = 0; + long batchSize = getBatchSize(msgId); + Consumer ackOwnerConsumer = getAckOwnerConsumer(msgId.getLedgerId(), msgId.getEntryId()); if (msgId.getAckSetsCount() > 0) { long[] ackSets = new long[msgId.getAckSetsCount()]; for (int j = 0; j < msgId.getAckSetsCount(); j++) { ackSets[j] = msgId.getAckSetAt(j); } position = PositionImpl.get(msgId.getLedgerId(), msgId.getEntryId(), ackSets); + ackedCount = getAckedCountForBatchIndexLevelEnabled(position, batchSize, ackSets, ackOwnerConsumer); if (isTransactionEnabled()) { //sync the batch position bit set point, in order to delete the position in pending acks if (Subscription.isIndividualAckMode(subType)) { @@ -400,16 +436,22 @@ private CompletableFuture individualAckNormal(CommandAck ack, Map completableFuture = new CompletableFuture<>(); - completableFuture.complete(null); + CompletableFuture completableFuture = new CompletableFuture<>(); + completableFuture.complete(totalAckCount); if (isTransactionEnabled() && Subscription.isIndividualAckMode(subType)) { completableFuture.whenComplete((v, e) -> positionsAcked.forEach(position -> { //check if the position can remove from the consumer pending acks. @@ -427,37 +469,45 @@ private CompletableFuture individualAckNormal(CommandAck ack, Map individualAckWithTransaction(CommandAck ack) { + private CompletableFuture individualAckWithTransaction(CommandAck ack) { // Individual ack List> positionsAcked = new ArrayList<>(); - if (!isTransactionEnabled()) { return FutureUtil.failedFuture( new BrokerServiceException.NotAllowedException("Server don't support transaction ack!")); } + LongAdder totalAckCount = new LongAdder(); for (int i = 0; i < ack.getMessageIdsCount(); i++) { MessageIdData msgId = ack.getMessageIdAt(i); PositionImpl position; + long ackedCount = 0; + long batchSize = getBatchSize(msgId); + Consumer ackOwnerConsumer = getAckOwnerConsumer(msgId.getLedgerId(), msgId.getEntryId()); if (msgId.getAckSetsCount() > 0) { - long[] acksSets = new long[msgId.getAckSetsCount()]; + long[] ackSets = new long[msgId.getAckSetsCount()]; for (int j = 0; j < msgId.getAckSetsCount(); j++) { - acksSets[j] = msgId.getAckSetAt(j); + ackSets[j] = msgId.getAckSetAt(j); } - position = PositionImpl.get(msgId.getLedgerId(), msgId.getEntryId(), acksSets); + position = PositionImpl.get(msgId.getLedgerId(), msgId.getEntryId(), ackSets); + ackedCount = getAckedCountForTransactionAck(batchSize, ackSets); } else { position = PositionImpl.get(msgId.getLedgerId(), msgId.getEntryId()); + ackedCount = batchSize; } - - if (msgId.hasBatchIndex()) { + if (msgId.hasBatchSize()) { positionsAcked.add(new MutablePair<>(position, msgId.getBatchSize())); } else { - positionsAcked.add(new MutablePair<>(position, 0)); + positionsAcked.add(new MutablePair<>(position, (int) batchSize)); } + addAndGetUnAckedMsgs(ackOwnerConsumer, -(int) ackedCount); + checkCanRemovePendingAcksAndHandle(position, msgId); checkAckValidationError(ack, position); + + totalAckCount.add(ackedCount); } CompletableFuture completableFuture = transactionIndividualAcknowledge(ack.getTxnidMostBits(), @@ -473,7 +523,77 @@ private CompletableFuture individualAckWithTransaction(CommandAck ack) { } })); } - return completableFuture; + return completableFuture.thenApply(v -> totalAckCount.sum()); + } + + private long getBatchSize(MessageIdData msgId) { + long batchSize = 1; + if (Subscription.isIndividualAckMode(subType)) { + LongPair longPair = pendingAcks.get(msgId.getLedgerId(), msgId.getEntryId()); + // Consumer may ack the msg that not belongs to it. + if (longPair == null) { + Consumer ackOwnerConsumer = getAckOwnerConsumer(msgId.getLedgerId(), msgId.getEntryId()); + longPair = ackOwnerConsumer.getPendingAcks().get(msgId.getLedgerId(), msgId.getEntryId()); + if (longPair != null) { + batchSize = longPair.first; + } + } else { + batchSize = longPair.first; + } + } + return batchSize; + } + + private long getAckedCountForMsgIdNoAckSets(long batchSize, PositionImpl position, Consumer consumer) { + if (isAcknowledgmentAtBatchIndexLevelEnabled && Subscription.isIndividualAckMode(subType)) { + long[] cursorAckSet = getCursorAckSet(position); + if (cursorAckSet != null) { + return getAckedCountForBatchIndexLevelEnabled(position, batchSize, EMPTY_ACK_SET, consumer); + } + } + return batchSize; + } + + private long getAckedCountForBatchIndexLevelEnabled(PositionImpl position, long batchSize, long[] ackSets, + Consumer consumer) { + long ackedCount = 0; + if (isAcknowledgmentAtBatchIndexLevelEnabled && Subscription.isIndividualAckMode(subType) + && consumer.getPendingAcks().get(position.getLedgerId(), position.getEntryId()) != null) { + long[] cursorAckSet = getCursorAckSet(position); + if (cursorAckSet != null) { + BitSetRecyclable cursorBitSet = BitSetRecyclable.create().resetWords(cursorAckSet); + int lastCardinality = cursorBitSet.cardinality(); + BitSetRecyclable givenBitSet = BitSetRecyclable.create().resetWords(ackSets); + cursorBitSet.and(givenBitSet); + givenBitSet.recycle(); + int currentCardinality = cursorBitSet.cardinality(); + ackedCount = lastCardinality - currentCardinality; + cursorBitSet.recycle(); + } else { + ackedCount = batchSize - BitSet.valueOf(ackSets).cardinality(); + } + } + return ackedCount; + } + + private long getAckedCountForTransactionAck(long batchSize, long[] ackSets) { + BitSetRecyclable bitset = BitSetRecyclable.create().resetWords(ackSets); + long ackedCount = batchSize - bitset.cardinality(); + bitset.recycle(); + return ackedCount; + } + + private long getUnAckedCountForBatchIndexLevelEnabled(PositionImpl position, long batchSize) { + long unAckedCount = batchSize; + if (isAcknowledgmentAtBatchIndexLevelEnabled) { + long[] cursorAckSet = getCursorAckSet(position); + if (cursorAckSet != null) { + BitSetRecyclable cursorBitSet = BitSetRecyclable.create().resetWords(cursorAckSet); + unAckedCount = cursorBitSet.cardinality(); + cursorBitSet.recycle(); + } + } + return unAckedCount; } private void checkAckValidationError(CommandAck ack, PositionImpl position) { @@ -489,6 +609,28 @@ private void checkCanRemovePendingAcksAndHandle(PositionImpl position, MessageId } } + private Consumer getAckOwnerConsumer(long ledgerId, long entryId) { + Consumer ackOwnerConsumer = this; + if (Subscription.isIndividualAckMode(subType)) { + if (!getPendingAcks().containsKey(ledgerId, entryId)) { + for (Consumer consumer : subscription.getConsumers()) { + if (consumer != this && consumer.getPendingAcks().containsKey(ledgerId, entryId)) { + ackOwnerConsumer = consumer; + break; + } + } + } + } + return ackOwnerConsumer; + } + + private long[] getCursorAckSet(PositionImpl position) { + if (!(subscription instanceof PersistentSubscription)) { + return null; + } + return (((PersistentSubscription) subscription).getCursor()).getDeletedBatchIndexesAsLongArray(position); + } + private boolean isTransactionEnabled() { return subscription instanceof PersistentSubscription && ((PersistentTopic) subscription.getTopic()) @@ -574,8 +716,11 @@ public int getAvailablePermits() { return MESSAGE_PERMITS_UPDATER.get(this); } + /** + * return 0 if there is no entry dispatched yet. + */ public int getAvgMessagesPerEntry() { - return AVG_MESSAGES_PER_ENTRY.get(this); + return (int) Math.round(avgMessagesPerEntry.get()); } public boolean isBlocked() { @@ -601,8 +746,11 @@ public void updateRates() { msgOut.calculateRate(); chunkedMessageRate.calculateRate(); msgRedeliver.calculateRate(); + messageAckRate.calculateRate(); + stats.msgRateOut = msgOut.getRate(); stats.msgThroughputOut = msgOut.getValueRate(); + stats.messageAckRate = messageAckRate.getValueRate(); stats.msgRateRedeliver = msgRedeliver.getRate(); stats.chunkedMessageRate = chunkedMessageRate.getRate(); } @@ -620,7 +768,7 @@ public void updateStats(ConsumerStatsImpl consumerStats) { } unackedMessages = consumerStats.unackedMessages; blockedConsumerOnUnackedMsgs = consumerStats.blockedConsumerOnUnackedMsgs; - AVG_MESSAGES_PER_ENTRY.set(this, consumerStats.avgMessagesPerEntry); + avgMessagesPerEntry.set(consumerStats.avgMessagesPerEntry); } public ConsumerStatsImpl getStats() { @@ -638,6 +786,14 @@ public ConsumerStatsImpl getStats() { return stats; } + public long getMsgOutCounter() { + return msgOutCounter.longValue(); + } + + public long getBytesOutCounter() { + return bytesOutCounter.longValue(); + } + public int getUnackedMessages() { return unackedMessages; } @@ -652,21 +808,26 @@ public String toString() { .add("consumerName", consumerName).add("address", this.cnx.clientAddress()).toString(); } - public void checkPermissions() { + public CompletableFuture checkPermissionsAsync() { TopicName topicName = TopicName.get(subscription.getTopicName()); if (cnx.getBrokerService().getAuthorizationService() != null) { - try { - if (cnx.getBrokerService().getAuthorizationService().canConsume(topicName, appId, - cnx.getAuthenticationData(), subscription.getName())) { - return; - } - } catch (Exception e) { - log.warn("[{}] Get unexpected error while autorizing [{}] {}", appId, subscription.getTopicName(), - e.getMessage(), e); - } - log.info("[{}] is not allowed to consume from topic [{}] anymore", appId, subscription.getTopicName()); - disconnect(); + return cnx.getBrokerService().getAuthorizationService().canConsumeAsync(topicName, appId, + cnx.getAuthenticationData(), subscription.getName()) + .handle((ok, e) -> { + if (e != null) { + log.warn("[{}] Get unexpected error while autorizing [{}] {}", appId, + subscription.getTopicName(), e.getMessage(), e); + } + + if (ok == null || !ok) { + log.info("[{}] is not allowed to consume from topic [{}] anymore", appId, + subscription.getTopicName()); + disconnect(); + } + return null; + }); } + return CompletableFuture.completedFuture(null); } @Override @@ -711,7 +872,6 @@ private void removePendingAcks(PositionImpl position) { ? ackOwnedConsumer.getPendingAcks().get(position.getLedgerId(), position.getEntryId()) : null; if (ackedPosition != null) { - int totalAckedMsgs = (int) ackedPosition.first; if (!ackOwnedConsumer.getPendingAcks().remove(position.getLedgerId(), position.getEntryId())) { // Message was already removed by the other consumer return; @@ -721,7 +881,7 @@ private void removePendingAcks(PositionImpl position) { } // unblock consumer-throttling when limit check is disabled or receives half of maxUnackedMessages => // consumer can start again consuming messages - int unAckedMsgs = addAndGetUnAckedMsgs(ackOwnedConsumer, -totalAckedMsgs); + int unAckedMsgs = UNACKED_MESSAGES_UPDATER.get(ackOwnedConsumer); if ((((unAckedMsgs <= maxUnackedMessages / 2) && ackOwnedConsumer.blockedConsumerOnUnackedMsgs) && ackOwnedConsumer.shouldBlockConsumerOnUnackMsgs()) || !shouldBlockConsumerOnUnackMsgs()) { @@ -751,7 +911,9 @@ public void redeliverUnacknowledgedMessages() { List pendingPositions = new ArrayList<>((int) pendingAcks.size()); MutableInt totalRedeliveryMessages = new MutableInt(0); pendingAcks.forEach((ledgerId, entryId, batchSize, stickyKeyHash) -> { - totalRedeliveryMessages.add((int) batchSize); + int unAckedCount = (int) getUnAckedCountForBatchIndexLevelEnabled(PositionImpl.get(ledgerId, entryId), + batchSize); + totalRedeliveryMessages.add(unAckedCount); pendingPositions.add(new PositionImpl(ledgerId, entryId)); }); @@ -775,9 +937,9 @@ public void redeliverUnacknowledgedMessages(List messageIds) { PositionImpl position = PositionImpl.get(msg.getLedgerId(), msg.getEntryId()); LongPair longPair = pendingAcks.get(position.getLedgerId(), position.getEntryId()); if (longPair != null) { - long batchSize = longPair.first; + int unAckedCount = (int) getUnAckedCountForBatchIndexLevelEnabled(position, longPair.first); pendingAcks.remove(position.getLedgerId(), position.getEntryId()); - totalRedeliveryMessages += batchSize; + totalRedeliveryMessages += unAckedCount; pendingPositions.add(position); } } @@ -811,8 +973,12 @@ public Subscription getSubscription() { } private int addAndGetUnAckedMsgs(Consumer consumer, int ackedMessages) { - subscription.addUnAckedMessages(ackedMessages); - return UNACKED_MESSAGES_UPDATER.addAndGet(consumer, ackedMessages); + int unackedMsgs = 0; + if (Subscription.isIndividualAckMode(subType)) { + subscription.addUnAckedMessages(ackedMessages); + unackedMsgs = UNACKED_MESSAGES_UPDATER.addAndGet(consumer, ackedMessages); + } + return unackedMsgs; } private void clearUnAckedMsgs() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisPublishLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisPublishLimiter.java index e61597e2d139f..67cc46d95fada 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisPublishLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PrecisPublishLimiter.java @@ -133,7 +133,7 @@ public boolean tryAcquire(int numbers, long bytes) { } @Override - public void close() throws Exception { + public void close() { rateLimitFunction.apply(); replaceLimiters(null); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java index d13e2f93632fb..8043695bb9911 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Producer.java @@ -126,9 +126,10 @@ public Producer(Topic topic, TransportCnx cnx, long producerId, String producerN stats.metadata = this.metadata; stats.accessMode = Commands.convertProducerAccessMode(accessMode); - this.isRemote = producerName - .startsWith(cnx.getBrokerService().pulsar().getConfiguration().getReplicatorPrefix()); - this.remoteCluster = isRemote ? producerName.split("\\.")[2].split(REPL_PRODUCER_NAME_DELIMITER)[0] : null; + + String replicatorPrefix = cnx.getBrokerService().pulsar().getConfiguration().getReplicatorPrefix() + "."; + this.isRemote = producerName.startsWith(replicatorPrefix); + this.remoteCluster = parseRemoteClusterName(producerName, isRemote, replicatorPrefix); this.isEncrypted = isEncrypted; this.schemaVersion = schemaVersion; @@ -138,22 +139,31 @@ public Producer(Topic topic, TransportCnx cnx, long producerId, String producerN this.clientAddress = cnx.clientSourceAddress(); } - @Override - public int hashCode() { - return Objects.hash(producerName); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof Producer) { - Producer other = (Producer) obj; - return Objects.equals(producerName, other.producerName) - && Objects.equals(topic, other.topic) - && producerId == other.producerId - && Objects.equals(cnx, other.cnx); + /** + * Producer name for replicator is in format. + * "replicatorPrefix.localCluster" (old) + * "replicatorPrefix.localCluster-->remoteCluster" (new) + */ + private String parseRemoteClusterName(String producerName, boolean isRemote, String replicatorPrefix) { + if (isRemote) { + String clusterName = producerName.substring(replicatorPrefix.length()); + return clusterName.contains(REPL_PRODUCER_NAME_DELIMITER) + ? clusterName.split(REPL_PRODUCER_NAME_DELIMITER)[0] : clusterName; } + return null; + } - return false; + /** + * Method to determine if this producer can replace another producer. + * @param other - producer to compare to this one + * @return true if this producer is a subsequent instantiation of the same logical producer. Otherwise, false. + */ + public boolean isSuccessorTo(Producer other) { + return Objects.equals(producerName, other.producerName) + && Objects.equals(topic, other.topic) + && producerId == other.producerId + && Objects.equals(cnx, other.cnx) + && other.getEpoch() < epoch; } public void publishMessage(long producerId, long sequenceId, ByteBuf headersAndPayload, long batchSize, @@ -637,21 +647,26 @@ long getPendingPublishAcks() { return pendingPublishAcks; } - public void checkPermissions() { + public CompletableFuture checkPermissionsAsync() { TopicName topicName = TopicName.get(topic.getName()); if (cnx.getBrokerService().getAuthorizationService() != null) { - try { - if (cnx.getBrokerService().getAuthorizationService().canProduce(topicName, appId, - cnx.getAuthenticationData())) { - return; - } - } catch (Exception e) { - log.warn("[{}] Get unexpected error while autorizing [{}] {}", appId, topic.getName(), e.getMessage(), - e); - } - log.info("[{}] is not allowed to produce on topic [{}] anymore", appId, topic.getName()); - disconnect(); - } + return cnx.getBrokerService().getAuthorizationService() + .canProduceAsync(topicName, appId, cnx.getAuthenticationData()) + .handle((ok, ex) -> { + if (ex != null) { + log.warn("[{}] Get unexpected error while autorizing [{}] {}", appId, topic.getName(), + ex.getMessage(), ex); + } + + if (ok == null || !ok) { + log.info("[{}] is not allowed to produce on topic [{}] anymore", appId, topic.getName()); + disconnect(); + } + + return null; + }); + } + return CompletableFuture.completedFuture(null); } public void checkEncryption() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiter.java index 397887978b2b5..931f35cfa1bd4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiter.java @@ -71,4 +71,9 @@ public interface PublishRateLimiter extends AutoCloseable { * @param bytes */ boolean tryAcquire(int numbers, long bytes); + + /** + * Close the limiter. + */ + void close(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterDisable.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterDisable.java index 81c4b82317f83..72c8132128e19 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterDisable.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterDisable.java @@ -63,7 +63,7 @@ public boolean tryAcquire(int numbers, long bytes) { } @Override - public void close() throws Exception { + public void close() { // No-op } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterImpl.java index 0e1200edc31cb..f1646684b82cb 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PublishRateLimiterImpl.java @@ -110,7 +110,7 @@ public boolean tryAcquire(int numbers, long bytes) { } @Override - public void close() throws Exception { + public void close() { // no-op } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarChannelInitializer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarChannelInitializer.java index 831e56f4f5d53..e75c518a50f02 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarChannelInitializer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/PulsarChannelInitializer.java @@ -28,6 +28,7 @@ import io.netty.handler.flow.FlowControlHandler; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.SslProvider; import java.net.SocketAddress; import java.util.concurrent.TimeUnit; import lombok.Builder; @@ -92,10 +93,18 @@ public PulsarChannelInitializer(PulsarService pulsar, PulsarChannelOptions opts) serviceConfig.getTlsProtocols(), serviceConfig.getTlsCertRefreshCheckDurationSec()); } else { - sslCtxRefresher = new NettyServerSslContextBuilder(serviceConfig.isTlsAllowInsecureConnection(), - serviceConfig.getTlsTrustCertsFilePath(), serviceConfig.getTlsCertificateFilePath(), + SslProvider sslProvider = null; + if (serviceConfig.getTlsProvider() != null) { + sslProvider = SslProvider.valueOf(serviceConfig.getTlsProvider()); + } + sslCtxRefresher = new NettyServerSslContextBuilder( + sslProvider, + serviceConfig.isTlsAllowInsecureConnection(), + serviceConfig.getTlsTrustCertsFilePath(), + serviceConfig.getTlsCertificateFilePath(), serviceConfig.getTlsKeyFilePath(), - serviceConfig.getTlsCiphers(), serviceConfig.getTlsProtocols(), + serviceConfig.getTlsCiphers(), + serviceConfig.getTlsProtocols(), serviceConfig.isTlsRequireTrustedClientCertOnConnect(), serviceConfig.getTlsCertRefreshCheckDurationSec()); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java index 5be21cc585f04..24cedf27277f9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.service; import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.commons.lang3.StringUtils.EMPTY; import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.unsafeGetPartitionedTopicMetadataAsync; import static org.apache.pulsar.broker.lookup.TopicLookupBase.lookupTopicAsync; @@ -27,6 +28,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelOption; @@ -65,8 +67,8 @@ import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.TransactionMetadataStoreService; -import org.apache.pulsar.broker.authentication.AuthenticationDataCommand; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationDataSubscription; import org.apache.pulsar.broker.authentication.AuthenticationProvider; import org.apache.pulsar.broker.authentication.AuthenticationState; import org.apache.pulsar.broker.intercept.BrokerInterceptor; @@ -230,7 +232,10 @@ public ServerCnx(PulsarService pulsar) { } public ServerCnx(PulsarService pulsar, String listenerName) { - super(pulsar.getBrokerService().getKeepAliveIntervalSeconds(), TimeUnit.SECONDS); + // pulsar.getBrokerService() can sometimes be null in unit tests when using mocks + // the null check is a workaround for #13620 + super(pulsar.getBrokerService() != null ? pulsar.getBrokerService().getKeepAliveIntervalSeconds() : 0, + TimeUnit.SECONDS); this.service = pulsar.getBrokerService(); this.schemaService = pulsar.getSchemaRegistryService(); this.listenerName = listenerName; @@ -238,8 +243,14 @@ public ServerCnx(PulsarService pulsar, String listenerName) { ServiceConfiguration conf = pulsar.getConfiguration(); // This maps are not heavily contended since most accesses are within the cnx thread - this.producers = new ConcurrentLongHashMap<>(8, 1); - this.consumers = new ConcurrentLongHashMap<>(8, 1); + this.producers = ConcurrentLongHashMap.>newBuilder() + .expectedItems(8) + .concurrencyLevel(1) + .build(); + this.consumers = ConcurrentLongHashMap.>newBuilder() + .expectedItems(8) + .concurrencyLevel(1) + .build(); this.replicatorPrefix = conf.getReplicatorPrefix(); this.maxNonPersistentPendingMessages = conf.getMaxConcurrentNonPersistentMessagePerConnection(); this.proxyRoles = conf.getProxyRoles(); @@ -261,13 +272,13 @@ public ServerCnx(PulsarService pulsar, String listenerName) { @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { super.channelActive(ctx); - ConnectionController.Sate sate = connectionController.increaseConnection(remoteAddress); - if (!sate.equals(ConnectionController.Sate.OK)) { - ctx.channel().writeAndFlush(Commands.newError(-1, ServerError.NotAllowedError, - sate.equals(ConnectionController.Sate.REACH_MAX_CONNECTION) - ? "Reached the maximum number of connections" - : "Reached the maximum number of connections on address" + remoteAddress)); - ctx.channel().close(); + ConnectionController.State state = connectionController.increaseConnection(remoteAddress); + if (!state.equals(ConnectionController.State.OK)) { + ctx.writeAndFlush(Commands.newError(-1, ServerError.NotAllowedError, + state.equals(ConnectionController.State.REACH_MAX_CONNECTION) + ? "Reached the maximum number of connections" + : "Reached the maximum number of connections on address" + remoteAddress)) + .addListener(ChannelFutureListener.CLOSE); return; } log.info("New connection from {}", remoteAddress); @@ -292,6 +303,11 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { // Connection is gone, close the producers immediately producers.forEach((__, producerFuture) -> { + // prevent race conditions in completing producers + if (!producerFuture.isDone() + && producerFuture.completeExceptionally(new IllegalStateException("Connection closed."))) { + return; + } if (producerFuture.isDone() && !producerFuture.isCompletedExceptionally()) { Producer producer = producerFuture.getNow(null); producer.closeNow(true); @@ -299,17 +315,18 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { }); consumers.forEach((__, consumerFuture) -> { - Consumer consumer; - if (consumerFuture.isDone() && !consumerFuture.isCompletedExceptionally()) { - consumer = consumerFuture.getNow(null); - } else { + // prevent race conditions in completing consumers + if (!consumerFuture.isDone() + && consumerFuture.completeExceptionally(new IllegalStateException("Connection closed."))) { return; } - - try { - consumer.close(); - } catch (BrokerServiceException e) { - log.warn("Consumer {} was already closed: {}", consumer, e); + if (consumerFuture.isDone() && !consumerFuture.isCompletedExceptionally()) { + Consumer consumer = consumerFuture.getNow(null); + try { + consumer.close(); + } catch (BrokerServiceException e) { + log.warn("Consumer {} was already closed: {}", consumer, e); + } } }); this.service.getPulsarStats().recordConnectionClose(); @@ -359,19 +376,22 @@ private boolean invalidOriginalPrincipal(String originalPrincipal) { // // Incoming commands handling // //// - private CompletableFuture isTopicOperationAllowed(TopicName topicName, TopicOperation operation) { + private CompletableFuture isTopicOperationAllowed(TopicName topicName, TopicOperation operation, + AuthenticationDataSource authDataSource, + AuthenticationDataSource originalAuthDataSource) { if (!service.isAuthorizationEnabled()) { return CompletableFuture.completedFuture(true); } CompletableFuture isProxyAuthorizedFuture; if (originalPrincipal != null) { isProxyAuthorizedFuture = service.getAuthorizationService().allowTopicOperationAsync( - topicName, operation, originalPrincipal, getAuthenticationData()); + topicName, operation, originalPrincipal, + originalAuthDataSource != null ? originalAuthDataSource : authDataSource); } else { isProxyAuthorizedFuture = CompletableFuture.completedFuture(true); } CompletableFuture isAuthorizedFuture = service.getAuthorizationService().allowTopicOperationAsync( - topicName, operation, authRole, authenticationData); + topicName, operation, authRole, authDataSource); return isProxyAuthorizedFuture.thenCombine(isAuthorizedFuture, (isProxyAuthorized, isAuthorized) -> { if (!isProxyAuthorized) { log.warn("OriginalRole {} is not authorized to perform operation {} on topic {}", @@ -388,15 +408,15 @@ private CompletableFuture isTopicOperationAllowed(TopicName topicName, private CompletableFuture isTopicOperationAllowed(TopicName topicName, String subscriptionName, TopicOperation operation) { if (service.isAuthorizationEnabled()) { - if (authenticationData == null) { - authenticationData = new AuthenticationDataCommand("", subscriptionName); - } else { - authenticationData.setSubscription(subscriptionName); - } + AuthenticationDataSource authData = + new AuthenticationDataSubscription(getAuthenticationData(), subscriptionName); + AuthenticationDataSource authDataSource = + new AuthenticationDataSubscription(authenticationData, subscriptionName); + AuthenticationDataSource originalAuthDataSource = null; if (originalAuthData != null) { - originalAuthData.setSubscription(subscriptionName); + originalAuthDataSource = new AuthenticationDataSubscription(originalAuthData, subscriptionName); } - return isTopicOperationAllowed(topicName, operation); + return isTopicOperationAllowed(topicName, operation, authDataSource, originalAuthDataSource); } else { return CompletableFuture.completedFuture(true); } @@ -408,10 +428,12 @@ protected void handleLookup(CommandLookupTopic lookup) { final boolean authoritative = lookup.isAuthoritative(); // use the connection-specific listener name by default. - final String advertisedListenerName = lookup.hasAdvertisedListenerName() ? lookup.getAdvertisedListenerName() - : this.listenerName; + final String advertisedListenerName = + lookup.hasAdvertisedListenerName() && StringUtils.isNotBlank(lookup.getAdvertisedListenerName()) + ? lookup.getAdvertisedListenerName() : this.listenerName; if (log.isDebugEnabled()) { - log.debug("[{}] Received Lookup from {} for {}", lookup.getTopic(), remoteAddress, requestId); + log.debug("[{}] Received Lookup from {} for {} requesting listener {}", lookup.getTopic(), remoteAddress, + requestId, StringUtils.isNotBlank(advertisedListenerName) ? advertisedListenerName : "(none)"); } TopicName topicName = validateTopicName(lookup.getTopic(), requestId, lookup); @@ -429,7 +451,8 @@ protected void handleLookup(CommandLookupTopic lookup) { lookupSemaphore.release(); return; } - isTopicOperationAllowed(topicName, TopicOperation.LOOKUP).thenApply(isAuthorized -> { + isTopicOperationAllowed(topicName, TopicOperation.LOOKUP, authenticationData, originalAuthData).thenApply( + isAuthorized -> { if (isAuthorized) { lookupTopicAsync(getBrokerService().pulsar(), topicName, authoritative, getPrincipal(), getAuthenticationData(), @@ -492,7 +515,8 @@ protected void handlePartitionMetadataRequest(CommandPartitionedTopicMetadata pa lookupSemaphore.release(); return; } - isTopicOperationAllowed(topicName, TopicOperation.LOOKUP).thenApply(isAuthorized -> { + isTopicOperationAllowed(topicName, TopicOperation.LOOKUP, authenticationData, originalAuthData).thenApply( + isAuthorized -> { if (isAuthorized) { unsafeGetPartitionedTopicMetadataAsync(getBrokerService().pulsar(), topicName) .handle((metadata, ex) -> { @@ -592,6 +616,7 @@ ByteBuf createConsumerStatsResponse(Consumer consumer, long requestId) { .setConnectedSince(consumerStats.getConnectedSince()) .setMsgBacklog(subscription.getNumberOfEntriesInBacklog(false)) .setMsgRateExpired(subscription.getExpiredMessageRate()) + .setMessageAckRate(consumerStats.messageAckRate) .setType(subscription.getTypeString()); return Commands.serializeWithSize(cmd); @@ -804,8 +829,13 @@ protected void handleConnect(CommandConnect connect) { authenticationData = authState.getAuthDataSource(); if (log.isDebugEnabled()) { - log.debug("[{}] Authenticate role : {}", remoteAddress, - authState != null ? authState.getAuthRole() : null); + String role = ""; + if (authState != null && authState.isComplete()) { + role = authState.getAuthRole(); + } else { + role = "authentication incomplete or null"; + } + log.debug("[{}] Authenticate role : {}", remoteAddress, role); } state = doAuthentication(clientData, clientProtocolVersion, clientVersion); @@ -873,7 +903,8 @@ protected void handleAuthResponse(CommandAuthResponse authResponse) { try { AuthData clientData = AuthData.of(authResponse.getResponse().getAuthData()); - doAuthentication(clientData, authResponse.getProtocolVersion(), authResponse.getClientVersion()); + doAuthentication(clientData, authResponse.getProtocolVersion(), + authResponse.hasClientVersion() ? authResponse.getClientVersion() : EMPTY); } catch (AuthenticationException e) { service.getPulsarStats().recordConnectionCreateFail(); log.warn("[{}] Authentication failed: {} ", remoteAddress, e.getMessage()); @@ -959,32 +990,31 @@ protected void handleSubscribe(final CommandSubscribe subscribe) { consumerFuture); if (existingConsumerFuture != null) { - if (existingConsumerFuture.isDone() && !existingConsumerFuture.isCompletedExceptionally()) { - Consumer consumer = existingConsumerFuture.getNow(null); - log.info("[{}] Consumer with the same id is already created:" - + " consumerId={}, consumer={}", - remoteAddress, consumerId, consumer); - commandSender.sendSuccessResponse(requestId); - return null; - } else { + if (!existingConsumerFuture.isDone()){ // There was an early request to create a consumer with same consumerId. This can happen // when // client timeout is lower the broker timeouts. We need to wait until the previous // consumer // creation request either complete or fails. log.warn("[{}][{}][{}] Consumer with id is already present on the connection," - + " consumerId={}", remoteAddress, topicName, subscriptionName, consumerId); - ServerError error = null; - if (!existingConsumerFuture.isDone()) { - error = ServerError.ServiceNotReady; - } else { - error = getErrorCode(existingConsumerFuture); - consumers.remove(consumerId, existingConsumerFuture); - } - commandSender.sendErrorResponse(requestId, error, + + " consumerId={}", remoteAddress, topicName, subscriptionName, consumerId); + commandSender.sendErrorResponse(requestId, ServerError.ServiceNotReady, "Consumer is already present on the connection"); - return null; + } else if (existingConsumerFuture.isCompletedExceptionally()){ + ServerError error = getErrorCodeWithErrorLog(existingConsumerFuture, true, + String.format("Consumer subscribe failure. remoteAddress: %s, subscription: %s", + remoteAddress, subscriptionName)); + consumers.remove(consumerId, existingConsumerFuture); + commandSender.sendErrorResponse(requestId, error, + "Consumer that failed is already present on the connection"); + } else { + Consumer consumer = existingConsumerFuture.getNow(null); + log.info("[{}] Consumer with the same id is already created:" + + " consumerId={}, consumer={}", + remoteAddress, consumerId, consumer); + commandSender.sendSuccessResponse(requestId); } + return null; } boolean createTopicIfDoesNotExist = forceTopicCreation @@ -1002,7 +1032,8 @@ protected void handleSubscribe(final CommandSubscribe subscribe) { boolean rejectSubscriptionIfDoesNotExist = isDurable && !service.isAllowAutoSubscriptionCreation(topicName.toString()) - && !topic.getSubscriptions().containsKey(subscriptionName); + && !topic.getSubscriptions().containsKey(subscriptionName) + && topic.isPersistent(); if (rejectSubscriptionIfDoesNotExist) { return FutureUtil @@ -1140,7 +1171,7 @@ protected void handleProducer(final CommandProducer cmdProducer) { } CompletableFuture isAuthorizedFuture = isTopicOperationAllowed( - topicName, TopicOperation.PRODUCE + topicName, TopicOperation.PRODUCE, authenticationData, originalAuthData ); isAuthorizedFuture.thenApply(isAuthorized -> { if (isAuthorized) { @@ -1185,7 +1216,7 @@ protected void handleProducer(final CommandProducer cmdProducer) { log.info("[{}][{}] Creating producer. producerId={}", remoteAddress, topicName, producerId); - service.getOrCreateTopic(topicName.toString()).thenAccept((Topic topic) -> { + service.getOrCreateTopic(topicName.toString()).thenAcceptAsync((Topic topic) -> { // Before creating producer, check if backlog quota exceeded // on topic for size based limit and time based limit for (BacklogQuota.BacklogQuotaType backlogQuotaType : @@ -1244,18 +1275,20 @@ protected void handleProducer(final CommandProducer cmdProducer) { Throwable cause = exception.getCause(); log.error("producerId {}, requestId {} : TransactionBuffer recover failed", producerId, requestId, exception); + producers.remove(producerId, producerFuture); commandSender.sendErrorResponse(requestId, ServiceUnitNotReadyException.getClientErrorCode(cause), cause.getMessage()); return null; }); }); - }).exceptionally(exception -> { + }, getBrokerService().getPulsar().getExecutor()).exceptionally(exception -> { Throwable cause = exception.getCause(); if (cause instanceof NoSuchElementException) { cause = new TopicNotFoundException("Topic Not Found."); - } - if (!Exceptions.areExceptionsPresentInChain(cause, + log.info("[{}] Failed to load topic {}, producerId={}: Topic not found", + remoteAddress, topicName, producerId); + } else if (!Exceptions.areExceptionsPresentInChain(cause, ServiceUnitNotReadyException.class, ManagedLedgerException.class)) { // Do not print stack traces for expected exceptions log.error("[{}] Failed to create topic {}, producerId={}", @@ -1323,8 +1356,15 @@ private void buildProducerAndAddTopic(Topic topic, long producerId, String produ producers.remove(producerId, producerFuture); }).exceptionally(ex -> { - log.error("[{}] Failed to add producer to topic {}: producerId={}, {}", - remoteAddress, topicName, producerId, ex.getMessage()); + if (ex.getCause() instanceof BrokerServiceException.ProducerFencedException) { + if (log.isDebugEnabled()) { + log.debug("[{}] Failed to add producer to topic {}: producerId={}, {}", + remoteAddress, topicName, producerId, ex.getCause().getMessage()); + } + } else { + log.warn("[{}] Failed to add producer to topic {}: producerId={}, {}", + remoteAddress, topicName, producerId, ex.getCause().getMessage()); + } producer.closeNow(true); if (producerFuture.completeExceptionally(ex)) { @@ -1352,7 +1392,9 @@ protected void handleSend(CommandSend send, ByteBuf headersAndPayload) { CompletableFuture producerFuture = producers.get(send.getProducerId()); if (producerFuture == null || !producerFuture.isDone() || producerFuture.isCompletedExceptionally()) { - log.warn("[{}] Producer had already been closed: {}", remoteAddress, send.getProducerId()); + log.warn("[{}] Received message, but the producer is not ready : {}. Closing the connection.", + remoteAddress, send.getProducerId()); + close(); return; } @@ -1579,9 +1621,8 @@ protected void handleCloseProducer(CommandCloseProducer closeProducer) { CompletableFuture producerFuture = producers.get(producerId); if (producerFuture == null) { - log.warn("[{}] Producer was not registered on the connection. producerId={}", remoteAddress, producerId); - commandSender.sendErrorResponse(requestId, ServerError.UnknownError, - "Producer was not registered on the connection"); + log.info("[{}] Producer {} was not registered on the connection", remoteAddress, producerId); + ctx.writeAndFlush(Commands.newSuccess(requestId)); return; } @@ -1626,8 +1667,8 @@ protected void handleCloseConsumer(CommandCloseConsumer closeConsumer) { CompletableFuture consumerFuture = consumers.get(consumerId); if (consumerFuture == null) { - log.warn("[{}] Consumer was not registered on the connection: consumerId={}", remoteAddress, consumerId); - commandSender.sendErrorResponse(requestId, ServerError.MetadataError, "Consumer not found"); + log.info("[{}] Consumer was not registered on the connection: {}", consumerId, remoteAddress); + ctx.writeAndFlush(Commands.newSuccess(requestId)); return; } @@ -1708,7 +1749,10 @@ private void getLargestBatchIndexWhenPossible( ManagedLedgerImpl ml = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); // If it's not pointing to a valid entry, respond messageId of the current position. - if (lastPosition.getEntryId() == -1) { + // If the compaction cursor reach the end of the topic, respond messageId from compacted ledger + Optional compactionHorizon = persistentTopic.getCompactedTopic().getCompactionHorizon(); + if (lastPosition.getEntryId() == -1 || (compactionHorizon.isPresent() + && lastPosition.compareTo((PositionImpl) compactionHorizon.get()) <= 0)) { handleLastMessageIdFromCompactedLedger(persistentTopic, requestId, partitionIndex, markDeletePosition); return; @@ -1840,6 +1884,9 @@ protected void handleGetTopicsOfNamespace(CommandGetTopicsOfNamespace commandGet if (isAuthorized) { getBrokerService().pulsar().getNamespaceService().getListOfTopics(namespaceName, mode) .thenAccept(topics -> { + topics = topics.stream() + .filter(topic -> !PulsarService.isTransactionSystemTopic(TopicName.get(topic))) + .collect(Collectors.toList()); if (log.isDebugEnabled()) { log.debug( "[{}] Received CommandGetTopicsOfNamespace for namespace [//{}] by {}, size:{}", @@ -1899,6 +1946,11 @@ remoteAddress, new String(commandGetSchema.getSchemaVersion()), long requestId = commandGetSchema.getRequestId(); SchemaVersion schemaVersion = SchemaVersion.Latest; if (commandGetSchema.hasSchemaVersion()) { + if (commandGetSchema.getSchemaVersion().length == 0) { + commandSender.sendGetSchemaErrorResponse(requestId, ServerError.IncompatibleSchema, + "Empty schema version"); + return; + } schemaVersion = schemaService.versionFromBytes(commandGetSchema.getSchemaVersion()); } @@ -2001,7 +2053,25 @@ private boolean checkTransactionEnableAndSenError(long requestId) { return true; } } + private Throwable handleTxnException(Throwable ex, String op, long requestId) { + Throwable cause = FutureUtil.unwrapCompletionException(ex); + if (cause instanceof CoordinatorException.CoordinatorNotFoundException) { + if (log.isDebugEnabled()) { + log.debug("The Coordinator was not found for the request {}", op); + } + return cause; + } + if (cause instanceof ManagedLedgerException.ManagedLedgerFencedException) { + if (log.isDebugEnabled()) { + log.debug("Throw a CoordinatorNotFoundException to client " + + "with the message got from a ManagedLedgerFencedException for the request {}", op); + } + return new CoordinatorException.CoordinatorNotFoundException(cause.getMessage()); + } + log.error("Send response error for {} request {}.", op, requestId, cause); + return cause; + } @Override protected void handleNewTxn(CommandNewTxn command) { final long requestId = command.getRequestId(); @@ -2026,9 +2096,7 @@ protected void handleNewTxn(CommandNewTxn command) { ctx.writeAndFlush(Commands.newTxnResponse(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits())); } else { - if (log.isDebugEnabled()) { - log.debug("Send response error for new txn request {}", requestId, ex); - } + ex = handleTxnException(ex, BaseCommand.Type.NEW_TXN.name(), requestId); ctx.writeAndFlush(Commands.newTxnResponse(requestId, tcId.getId(), BrokerServiceException.getClientErrorCode(ex), ex.getMessage())); @@ -2064,19 +2132,11 @@ protected void handleAddPartitionToTxn(CommandAddPartitionToTxn command) { ctx.writeAndFlush(Commands.newAddPartitionToTxnResponse(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits())); } else { - if (log.isDebugEnabled()) { - log.debug("Send response error for add published partition to txn request {}", requestId, - ex); - } + ex = handleTxnException(ex, BaseCommand.Type.ADD_PARTITION_TO_TXN.name(), requestId); - if (ex instanceof CoordinatorException.CoordinatorNotFoundException) { - ctx.writeAndFlush(Commands.newAddPartitionToTxnResponse(requestId, txnID.getMostSigBits(), - BrokerServiceException.getClientErrorCode(ex), ex.getMessage())); - } else { - ctx.writeAndFlush(Commands.newAddPartitionToTxnResponse(requestId, txnID.getMostSigBits(), - BrokerServiceException.getClientErrorCode(ex.getCause()), - ex.getCause().getMessage())); - } + ctx.writeAndFlush(Commands.newAddPartitionToTxnResponse(requestId, txnID.getMostSigBits(), + BrokerServiceException.getClientErrorCode(ex), + ex.getMessage())); transactionMetadataStoreService.handleOpFail(ex, tcId); } })); @@ -2103,16 +2163,10 @@ protected void handleEndTxn(CommandEndTxn command) { ctx.writeAndFlush(Commands.newEndTxnResponse(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits())); } else { - log.error("Send response error for end txn request.", ex); - - if (ex instanceof CoordinatorException.CoordinatorNotFoundException) { - ctx.writeAndFlush(Commands.newEndTxnResponse(requestId, txnID.getMostSigBits(), - BrokerServiceException.getClientErrorCode(ex), ex.getMessage())); - } else { - ctx.writeAndFlush(Commands.newEndTxnResponse(requestId, txnID.getMostSigBits(), - BrokerServiceException.getClientErrorCode(ex.getCause()), - ex.getCause().getMessage())); - } + ex = handleTxnException(ex, BaseCommand.Type.END_TXN.name(), requestId); + ctx.writeAndFlush(Commands.newEndTxnResponse(requestId, txnID.getMostSigBits(), + BrokerServiceException.getClientErrorCode(ex), ex.getMessage())); + transactionMetadataStoreService.handleOpFail(ex, tcId); } }); @@ -2321,22 +2375,12 @@ protected void handleAddSubscriptionToTxn(CommandAddSubscriptionToTxn command) { } ctx.writeAndFlush(Commands.newAddSubscriptionToTxnResponse(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits())); - log.info("handle add partition to txn finish."); } else { - if (log.isDebugEnabled()) { - log.debug("Send response error for add published partition to txn request {}", - requestId, ex); - } + ex = handleTxnException(ex, BaseCommand.Type.ADD_SUBSCRIPTION_TO_TXN.name(), requestId); - if (ex instanceof CoordinatorException.CoordinatorNotFoundException) { - ctx.writeAndFlush(Commands.newAddSubscriptionToTxnResponse(requestId, - txnID.getMostSigBits(), BrokerServiceException.getClientErrorCode(ex), - ex.getMessage())); - } else { - ctx.writeAndFlush(Commands.newAddSubscriptionToTxnResponse(requestId, - txnID.getMostSigBits(), BrokerServiceException.getClientErrorCode(ex.getCause()), - ex.getCause().getMessage())); - } + ctx.writeAndFlush(Commands.newAddSubscriptionToTxnResponse(requestId, + txnID.getMostSigBits(), BrokerServiceException.getClientErrorCode(ex), + ex.getMessage())); transactionMetadataStoreService.handleOpFail(ex, tcId); } })); @@ -2479,6 +2523,7 @@ public void startSendOperation(Producer producer, int msgSize, int numMessages) // When the quota of pending send requests is reached, stop reading from socket to cause backpressure on // client connection, possibly shared between multiple producers ctx.channel().config().setAutoRead(false); + recordRateLimitMetrics(producers); autoReadDisabledRateLimiting = isPublishRateExceeded; throttledConnections.inc(); } @@ -2500,6 +2545,17 @@ public void startSendOperation(Producer producer, int msgSize, int numMessages) } } + private void recordRateLimitMetrics(ConcurrentLongHashMap> producers) { + producers.forEach((key, producerFuture) -> { + if (producerFuture != null && producerFuture.isDone()) { + Producer p = producerFuture.getNow(null); + if (p != null && p.getTopic() != null) { + p.getTopic().increasePublishLimitedTimes(); + } + } + }); + } + @Override public void completedSendOperation(boolean isNonPersistentTopic, int msgSize) { if (pendingBytesPerThread.get().addAndGet(-msgSize) < resumeThresholdPendingBytesPerThread @@ -2544,6 +2600,7 @@ public void enableCnxAutoRead() { public void disableCnxAutoRead() { if (ctx != null && ctx.channel().config().isAutoRead()) { ctx.channel().config().setAutoRead(false); + recordRateLimitMetrics(producers); } } @@ -2563,6 +2620,11 @@ public void cancelPublishBufferLimiting() { } private ServerError getErrorCode(CompletableFuture future) { + return getErrorCodeWithErrorLog(future, false, null); + } + + private ServerError getErrorCodeWithErrorLog(CompletableFuture future, boolean logIfError, + String errorMessageIfLog) { ServerError error = ServerError.UnknownError; try { future.getNow(null); @@ -2570,6 +2632,11 @@ private ServerError getErrorCode(CompletableFuture future) { if (e.getCause() instanceof BrokerServiceException) { error = BrokerServiceException.getClientErrorCode(e.getCause()); } + if (logIfError){ + String finalErrorMessage = StringUtils.isNotBlank(errorMessageIfLog) + ? errorMessageIfLog : "Unknown Error"; + log.error(finalErrorMessage, e); + } } return error; } @@ -2783,4 +2850,29 @@ private static void logNamespaceNameAuthException(SocketAddress remoteAddress, S public boolean hasProducers() { return !producers.isEmpty(); } + + @VisibleForTesting + protected String getOriginalPrincipal() { + return originalPrincipal; + } + + @VisibleForTesting + protected AuthenticationDataSource getAuthData() { + return authenticationData; + } + + @VisibleForTesting + protected AuthenticationDataSource getOriginalAuthData() { + return originalAuthData; + } + + @VisibleForTesting + protected AuthenticationState getOriginalAuthState() { + return originalAuthState; + } + + @VisibleForTesting + protected void setAuthRole(String authRole) { + this.authRole = authRole; + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/StreamingStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/StreamingStats.java index 02dcb8233ffae..469c802b76a2c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/StreamingStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/StreamingStats.java @@ -65,6 +65,7 @@ public static void writeConsumerStats(StatsOutputStream statsStream, CommandSubs statsStream.writePair("msgThroughputOut", stats.msgThroughputOut); statsStream.writePair("msgRateRedeliver", stats.msgRateRedeliver); statsStream.writePair("avgMessagesPerEntry", stats.avgMessagesPerEntry); + statsStream.writePair("messageAckRate", stats.messageAckRate); if (Subscription.isIndividualAckMode(subType)) { statsStream.writePair("unackedMessages", stats.unackedMessages); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java index f5dc4b778aa09..38ec5d803c660 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesService.java @@ -22,9 +22,12 @@ import com.google.common.collect.Lists; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.Nonnull; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.namespace.NamespaceBundleOwnershipListener; @@ -35,6 +38,8 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.impl.Backoff; +import org.apache.pulsar.client.util.RetryUtil; import org.apache.pulsar.common.events.ActionType; import org.apache.pulsar.common.events.EventType; import org.apache.pulsar.common.events.PulsarEvent; @@ -43,6 +48,7 @@ import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.TopicPolicies; +import org.apache.pulsar.common.util.FutureUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,7 +72,8 @@ public class SystemTopicBasedTopicPoliciesService implements TopicPoliciesServic @VisibleForTesting final Map policyCacheInitMap = new ConcurrentHashMap<>(); - private final Map>> listeners = new ConcurrentHashMap<>(); + @VisibleForTesting + final Map>> listeners = new ConcurrentHashMap<>(); public SystemTopicBasedTopicPoliciesService(PulsarService pulsarService) { this.pulsarService = pulsarService; @@ -84,9 +91,13 @@ public CompletableFuture updateTopicPoliciesAsync(TopicName topicName, Top private CompletableFuture sendTopicPolicyEvent(TopicName topicName, ActionType actionType, TopicPolicies policies) { - createSystemTopicFactoryIfNeeded(); - CompletableFuture result = new CompletableFuture<>(); + try { + createSystemTopicFactoryIfNeeded(); + } catch (PulsarServerException e) { + result.completeExceptionally(e); + return result; + } SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory.createTopicPoliciesSystemTopicClient(topicName.getNamespaceObject()); @@ -118,7 +129,7 @@ private CompletableFuture sendTopicPolicyEvent(TopicName topicName, Action } } }); - }) + }) ); } }); @@ -141,6 +152,17 @@ private PulsarEvent getPulsarEvent(TopicName topicName, ActionType actionType, T } private void notifyListener(Message msg) { + // delete policies + if (msg.getValue() == null) { + TopicName topicName = TopicName.get(TopicName.get(msg.getKey()).getPartitionedTopicName()); + if (listeners.get(topicName) != null) { + for (TopicPolicyListener listener : listeners.get(topicName)) { + listener.onUpdate(null); + } + } + return; + } + if (!EventType.TOPIC_POLICY.equals(msg.getValue().getEventType())) { return; } @@ -157,6 +179,10 @@ private void notifyListener(Message msg) { @Override public TopicPolicies getTopicPolicies(TopicName topicName) throws TopicPoliciesCacheNotInitException { + if (!policyCacheInitMap.containsKey(topicName.getNamespaceObject())) { + NamespaceName namespace = topicName.getNamespaceObject(); + prepareInitPoliciesCache(namespace, new CompletableFuture<>()); + } if (policyCacheInitMap.containsKey(topicName.getNamespaceObject()) && !policyCacheInitMap.get(topicName.getNamespaceObject())) { throw new TopicPoliciesCacheNotInitException(); @@ -164,11 +190,17 @@ public TopicPolicies getTopicPolicies(TopicName topicName) throws TopicPoliciesC return policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); } + @Override + public TopicPolicies getTopicPoliciesIfExists(TopicName topicName) { + return policiesCache.get(TopicName.get(topicName.getPartitionedTopicName())); + } + @Override public CompletableFuture getTopicPoliciesBypassCacheAsync(TopicName topicName) { CompletableFuture result = new CompletableFuture<>(); - createSystemTopicFactoryIfNeeded(); - if (namespaceEventsSystemTopicFactory == null) { + try { + createSystemTopicFactoryIfNeeded(); + } catch (PulsarServerException e) { result.complete(null); return result; } @@ -188,33 +220,51 @@ public CompletableFuture addOwnedNamespaceBundleAsync(NamespaceBundle name result.complete(null); return result; } - createSystemTopicFactoryIfNeeded(); synchronized (this) { if (readerCaches.get(namespace) != null) { ownedBundlesCountPerNamespace.get(namespace).incrementAndGet(); result.complete(null); } else { - SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory - .createTopicPoliciesSystemTopicClient(namespace); - ownedBundlesCountPerNamespace.putIfAbsent(namespace, new AtomicInteger(1)); - policyCacheInitMap.put(namespace, false); - CompletableFuture> readerCompletableFuture = - systemTopicClient.newReaderAsync(); - readerCaches.put(namespace, readerCompletableFuture); - readerCompletableFuture.whenComplete((reader, ex) -> { - if (ex != null) { - log.error("[{}] Failed to create reader on __change_events topic", namespace, ex); - result.completeExceptionally(ex); - } else { - initPolicesCache(reader, result); - result.thenRun(() -> readMorePolicies(reader)); - } - }); + prepareInitPoliciesCache(namespace, result); } } return result; } + private void prepareInitPoliciesCache(@Nonnull NamespaceName namespace, CompletableFuture result) { + if (policyCacheInitMap.putIfAbsent(namespace, false) == null) { + CompletableFuture> readerCompletableFuture = + createSystemTopicClientWithRetry(namespace); + readerCaches.put(namespace, readerCompletableFuture); + ownedBundlesCountPerNamespace.putIfAbsent(namespace, new AtomicInteger(1)); + readerCompletableFuture.thenAccept(reader -> { + initPolicesCache(reader, result); + result.thenRun(() -> readMorePolicies(reader)); + }).exceptionally(ex -> { + log.error("[{}] Failed to create reader on __change_events topic", namespace, ex); + cleanCacheAndCloseReader(namespace, false); + result.completeExceptionally(ex); + return null; + }); + } + } + + protected CompletableFuture> createSystemTopicClientWithRetry( + NamespaceName namespace) { + CompletableFuture> result = new CompletableFuture<>(); + try { + createSystemTopicFactoryIfNeeded(); + } catch (PulsarServerException e) { + result.completeExceptionally(e); + return result; + } + SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory + .createTopicPoliciesSystemTopicClient(namespace); + Backoff backoff = new Backoff(1, TimeUnit.SECONDS, 3, TimeUnit.SECONDS, 10, TimeUnit.SECONDS); + RetryUtil.retryAsynchronously(systemTopicClient::newReaderAsync, backoff, pulsarService.getExecutor(), result); + return result; + } + @Override public CompletableFuture removeOwnedNamespaceBundleAsync(NamespaceBundle namespaceBundle) { NamespaceName namespace = namespaceBundle.getNamespaceObject(); @@ -224,14 +274,7 @@ public CompletableFuture removeOwnedNamespaceBundleAsync(NamespaceBundle n } AtomicInteger bundlesCount = ownedBundlesCountPerNamespace.get(namespace); if (bundlesCount == null || bundlesCount.decrementAndGet() <= 0) { - CompletableFuture> readerCompletableFuture = - readerCaches.remove(namespace); - if (readerCompletableFuture != null) { - readerCompletableFuture.thenAccept(SystemTopicClient.Reader::closeAsync); - ownedBundlesCountPerNamespace.remove(namespace); - policyCacheInitMap.remove(namespace); - policiesCache.entrySet().removeIf(entry -> entry.getKey().getNamespaceObject().equals(namespace)); - } + cleanCacheAndCloseReader(namespace, true); } return CompletableFuture.completedFuture(null); } @@ -252,12 +295,11 @@ public void unLoad(NamespaceBundle bundle) { removeOwnedNamespaceBundleAsync(bundle); } - @Override - public boolean test(NamespaceBundle namespaceBundle) { - return true; - } - - }); + @Override + public boolean test(NamespaceBundle namespaceBundle) { + return true; + } + }); } private void initPolicesCache(SystemTopicClient.Reader reader, CompletableFuture future) { @@ -266,15 +308,17 @@ private void initPolicesCache(SystemTopicClient.Reader reader, Comp log.error("[{}] Failed to check the move events for the system topic", reader.getSystemTopic().getTopicName(), ex); future.completeExceptionally(ex); - readerCaches.remove(reader.getSystemTopic().getTopicName().getNamespaceObject()); + cleanCacheAndCloseReader(reader.getSystemTopic().getTopicName().getNamespaceObject(), false); + return; } if (hasMore) { reader.readNextAsync().whenComplete((msg, e) -> { if (e != null) { log.error("[{}] Failed to read event from the system topic.", - reader.getSystemTopic().getTopicName(), ex); + reader.getSystemTopic().getTopicName(), e); future.completeExceptionally(e); - readerCaches.remove(reader.getSystemTopic().getTopicName().getNamespaceObject()); + cleanCacheAndCloseReader(reader.getSystemTopic().getTopicName().getNamespaceObject(), false); + return; } refreshTopicPoliciesCache(msg); if (log.isDebugEnabled()) { @@ -289,7 +333,6 @@ private void initPolicesCache(SystemTopicClient.Reader reader, Comp } policyCacheInitMap.computeIfPresent( reader.getSystemTopic().getTopicName().getNamespaceObject(), (k, v) -> true); - // replay policy message policiesCache.forEach(((topicName, topicPolicies) -> { if (listeners.get(topicName) != null) { @@ -303,23 +346,43 @@ private void initPolicesCache(SystemTopicClient.Reader reader, Comp }); } + private void cleanCacheAndCloseReader(@Nonnull NamespaceName namespace, boolean cleanOwnedBundlesCount) { + CompletableFuture> readerFuture = readerCaches.remove(namespace); + policiesCache.entrySet().removeIf(entry -> Objects.equals(entry.getKey().getNamespaceObject(), namespace)); + if (cleanOwnedBundlesCount) { + ownedBundlesCountPerNamespace.remove(namespace); + } + if (readerFuture != null && !readerFuture.isCompletedExceptionally()) { + readerFuture.thenCompose(SystemTopicClient.Reader::closeAsync) + .exceptionally(ex -> { + log.warn("[{}] Close change_event reader fail.", namespace, ex); + return null; + }); + } + policyCacheInitMap.remove(namespace); + } + private void readMorePolicies(SystemTopicClient.Reader reader) { - reader.readNextAsync().whenComplete((msg, ex) -> { - if (ex == null) { - refreshTopicPoliciesCache(msg); - notifyListener(msg); - readMorePolicies(reader); - } else { - if (ex instanceof PulsarClientException.AlreadyClosedException) { - log.error("Read more topic policies exception, close the read now!", ex); - NamespaceName namespace = reader.getSystemTopic().getTopicName().getNamespaceObject(); - ownedBundlesCountPerNamespace.remove(namespace); - readerCaches.remove(namespace); - } else { - readMorePolicies(reader); - } - } - }); + reader.readNextAsync() + .thenAccept(msg -> { + refreshTopicPoliciesCache(msg); + notifyListener(msg); + }) + .whenComplete((__, ex) -> { + if (ex == null) { + readMorePolicies(reader); + } else { + Throwable cause = FutureUtil.unwrapCompletionException(ex); + if (cause instanceof PulsarClientException.AlreadyClosedException) { + log.error("Read more topic policies exception, close the read now!", ex); + cleanCacheAndCloseReader( + reader.getSystemTopic().getTopicName().getNamespaceObject(), false); + } else { + log.warn("Read more topic polices exception, read again.", ex); + readMorePolicies(reader); + } + } + }); } private void refreshTopicPoliciesCache(Message msg) { @@ -347,6 +410,12 @@ private void refreshTopicPoliciesCache(Message msg) { // However, due to compatibility, it is temporarily retained here // and can be deleted in the future. policiesCache.remove(topicName); + try { + createSystemTopicFactoryIfNeeded(); + } catch (PulsarServerException e) { + log.error("Failed to create system topic factory"); + break; + } SystemTopicClient systemTopicClient = namespaceEventsSystemTopicFactory .createTopicPoliciesSystemTopicClient(topicName.getNamespaceObject()); systemTopicClient.newWriterAsync().thenAccept(writer @@ -366,7 +435,7 @@ private void refreshTopicPoliciesCache(Message msg) { } } - private void createSystemTopicFactoryIfNeeded() { + private void createSystemTopicFactoryIfNeeded() throws PulsarServerException { if (namespaceEventsSystemTopicFactory == null) { synchronized (this) { if (namespaceEventsSystemTopicFactory == null) { @@ -375,6 +444,7 @@ private void createSystemTopicFactoryIfNeeded() { new NamespaceEventsSystemTopicFactory(pulsarService.getClient()); } catch (PulsarServerException e) { log.error("Create namespace event system topic factory error.", e); + throw e; } } } @@ -393,7 +463,8 @@ private void fetchTopicPoliciesAsyncAndCloseReader(SystemTopicClient.Reader listener) { - listeners.computeIfAbsent(topicName, k -> Lists.newCopyOnWriteArrayList()).add(listener); + listeners.compute(topicName, (k, topicListeners) -> { + if (topicListeners == null) { + topicListeners = Lists.newCopyOnWriteArrayList(); + } + topicListeners.add(listener); + return topicListeners; + }); } @Override public void unregisterListener(TopicName topicName, TopicPolicyListener listener) { - listeners.computeIfAbsent(topicName, k -> Lists.newCopyOnWriteArrayList()).remove(listener); + listeners.compute(topicName, (k, topicListeners) -> { + if (topicListeners != null){ + topicListeners.remove(listener); + if (topicListeners.isEmpty()) { + topicListeners = null; + } + } + return topicListeners; + }); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java index 4e5c698178112..14e8e3fe00418 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Topic.java @@ -129,6 +129,11 @@ default boolean isMarkerMessage() { */ void recordAddLatency(long latency, TimeUnit unit); + /** + * increase the publishing limited times. + */ + long increasePublishLimitedTimes(); + CompletableFuture subscribe(TransportCnx cnx, String subscriptionName, long consumerId, SubType subType, int priorityLevel, String consumerName, boolean isDurable, MessageId startMessageId, @@ -258,6 +263,8 @@ default boolean isSystemTopic() { return false; } + boolean isPersistent(); + /* ------ Transaction related ------ */ /** @@ -286,4 +293,9 @@ default boolean isSystemTopic() { */ CompletableFuture truncate(); + /** + * Get BrokerService. + * @return + */ + BrokerService getBrokerService(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java index 48d2f1e449673..0165dc13cd331 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/TopicPoliciesService.java @@ -61,6 +61,13 @@ public interface TopicPoliciesService { */ TopicPolicies getTopicPolicies(TopicName topicName) throws TopicPoliciesCacheNotInitException; + /** + * Get policies from current cache. + * @param topicName topic name + * @return the topic policies + */ + TopicPolicies getTopicPoliciesIfExists(TopicName topicName); + /** * When getting TopicPolicies, if the initialization has not been completed, * we will go back off and try again until time out. @@ -78,11 +85,13 @@ default CompletableFuture> getTopicPoliciesAsyncWithRetr .create() : backoff; try { RetryUtil.retryAsynchronously(() -> { + CompletableFuture> future = new CompletableFuture<>(); try { - return Optional.ofNullable(getTopicPolicies(topicName)); + future.complete(Optional.ofNullable(getTopicPolicies(topicName))); } catch (BrokerServiceException.TopicPoliciesCacheNotInitException exception) { - throw new RuntimeException(exception); + future.completeExceptionally(exception); } + return future; }, usedBackoff, scheduledExecutorService, response); } catch (Exception e) { response.completeExceptionally(e); @@ -145,6 +154,11 @@ public TopicPolicies getTopicPolicies(TopicName topicName) throws TopicPoliciesC return null; } + @Override + public TopicPolicies getTopicPoliciesIfExists(TopicName topicName) { + return null; + } + @Override public CompletableFuture getTopicPoliciesBypassCacheAsync(TopicName topicName) { return CompletableFuture.completedFuture(null); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherSingleActiveConsumer.java index 6094ab71df2cc..5cdbff17b8171 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentDispatcherSingleActiveConsumer.java @@ -44,7 +44,7 @@ public final class NonPersistentDispatcherSingleActiveConsumer extends AbstractD public NonPersistentDispatcherSingleActiveConsumer(SubType subscriptionType, int partitionIndex, NonPersistentTopic topic, Subscription subscription) { super(subscriptionType, partitionIndex, topic.getName(), subscription, - topic.getBrokerService().pulsar().getConfiguration()); + topic.getBrokerService().pulsar().getConfiguration(), null); this.topic = topic; this.subscription = subscription; this.msgDrop = new Rate(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java index dd57fd91337cc..b863e9eb3c2cd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentReplicator.java @@ -28,13 +28,13 @@ import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.service.AbstractReplicator; import org.apache.pulsar.broker.service.BrokerService; -import org.apache.pulsar.broker.service.BrokerServiceException.NamingException; import org.apache.pulsar.broker.service.Replicator; import org.apache.pulsar.broker.service.persistent.PersistentReplicator; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.client.impl.ProducerImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.SendCallback; import org.apache.pulsar.common.policies.data.stats.NonPersistentReplicatorStatsImpl; import org.apache.pulsar.common.stats.Rate; @@ -49,8 +49,9 @@ public class NonPersistentReplicator extends AbstractReplicator implements Repli private final NonPersistentReplicatorStatsImpl stats = new NonPersistentReplicatorStatsImpl(); public NonPersistentReplicator(NonPersistentTopic topic, String localCluster, String remoteCluster, - BrokerService brokerService) throws NamingException, PulsarServerException { - super(topic.getName(), topic.getReplicatorPrefix(), localCluster, remoteCluster, brokerService); + BrokerService brokerService, PulsarClientImpl replicationClient) throws PulsarServerException { + super(topic.getName(), topic.getReplicatorPrefix(), localCluster, remoteCluster, brokerService, + replicationClient); producerBuilder.blockIfQueueFull(false); @@ -228,7 +229,7 @@ public MessageImpl getNextMessage() { @Override public CompletableFuture getFuture() { - return null; + return CompletableFuture.completedFuture(null); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentSubscription.java index 59fab2df3c683..b8a7df36af28e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentSubscription.java @@ -24,11 +24,11 @@ import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.intercept.BrokerInterceptor; +import org.apache.pulsar.broker.service.AbstractSubscription; import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.service.BrokerServiceException.ServerMetadataException; import org.apache.pulsar.broker.service.BrokerServiceException.SubscriptionBusyException; @@ -48,7 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class NonPersistentSubscription implements Subscription { +public class NonPersistentSubscription extends AbstractSubscription implements Subscription { private final NonPersistentTopic topic; private volatile NonPersistentDispatcher dispatcher; private final String topicName; @@ -66,9 +66,6 @@ public class NonPersistentSubscription implements Subscription { // Timestamp of when this subscription was last seen active private volatile long lastActive; - private final LongAdder bytesOutFromRemovedConsumers = new LongAdder(); - private final LongAdder msgOutFromRemovedConsumer = new LongAdder(); - // If isDurable is false(such as a Reader), remove subscription from topic when closing this subscription. private final boolean isDurable; @@ -453,6 +450,7 @@ public NonPersistentSubscriptionStatsImpl getStats() { ConsumerStatsImpl consumerStats = consumer.getStats(); subStats.consumers.add(consumerStats); subStats.msgRateOut += consumerStats.msgRateOut; + subStats.messageAckRate += consumerStats.messageAckRate; subStats.msgThroughputOut += consumerStats.msgThroughputOut; subStats.bytesOutCounter += consumerStats.bytesOutCounter; subStats.msgOutCounter += consumerStats.msgOutCounter; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java index c1687a35f50e3..35b900c297cba 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java @@ -25,6 +25,7 @@ import com.google.common.collect.Maps; import io.netty.buffer.ByteBuf; import io.netty.util.concurrent.FastThreadLocal; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; @@ -32,13 +33,13 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLongFieldUpdater; -import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.Position; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.resources.NamespaceResources; +import org.apache.pulsar.broker.service.AbstractReplicator; import org.apache.pulsar.broker.service.AbstractTopic; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.BrokerServiceException; @@ -61,9 +62,11 @@ import org.apache.pulsar.broker.stats.NamespaceStats; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.api.proto.KeySharedMeta; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats.CursorStats; @@ -98,9 +101,6 @@ public class NonPersistentTopic extends AbstractTopic implements Topic { AtomicLongFieldUpdater.newUpdater(NonPersistentTopic.class, "entriesAddedCounter"); private volatile long entriesAddedCounter = 0; - private final LongAdder bytesOutFromRemovedSubscriptions = new LongAdder(); - private final LongAdder msgOutFromRemovedSubscriptions = new LongAdder(); - private static final FastThreadLocal threadLocalTopicStats = new FastThreadLocal() { @Override protected TopicStats initialValue() { @@ -134,8 +134,16 @@ public void reset() { public NonPersistentTopic(String topic, BrokerService brokerService) { super(topic, brokerService); - this.subscriptions = new ConcurrentOpenHashMap<>(16, 1); - this.replicators = new ConcurrentOpenHashMap<>(16, 1); + this.subscriptions = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + this.replicators = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); this.isFenced = false; } @@ -400,7 +408,7 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, boolean c // topic GC iterates over topics map and removing from the map with the same thread creates // deadlock. so, execute it in different thread brokerService.executor().execute(() -> { - brokerService.removeTopicFromCache(topic); + brokerService.removeTopicFromCache(NonPersistentTopic.this); log.info("[{}] Topic deleted", topic); deleteFuture.complete(null); }); @@ -450,11 +458,7 @@ public CompletableFuture close(boolean closeWithoutWaitingClientDisconnect replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect())); producers.values().forEach(producer -> futures.add(producer.disconnect())); if (topicPublishRateLimiter != null) { - try { - topicPublishRateLimiter.close(); - } catch (Exception e) { - log.warn("Error closing topicPublishRateLimiter for topic {}", topic, e); - } + topicPublishRateLimiter.close(); } subscriptions.forEach((s, sub) -> futures.add(sub.disconnect())); if (this.resourceGroupPublishLimiter != null) { @@ -470,7 +474,7 @@ public CompletableFuture close(boolean closeWithoutWaitingClientDisconnect // unload topic iterates over topics map and removing from the map with the same thread creates deadlock. // so, execute it in different thread brokerService.executor().execute(() -> { - brokerService.removeTopicFromCache(topic); + brokerService.removeTopicFromCache(NonPersistentTopic.this); closeFuture.complete(null); }); }).exceptionally(exception -> { @@ -495,6 +499,10 @@ public CompletableFuture checkReplication() { if (!name.isGlobal()) { return CompletableFuture.completedFuture(null); } + NamespaceName heartbeatNamespace = brokerService.pulsar().getHeartbeatNamespaceV2(); + if (name.getNamespaceObject().equals(heartbeatNamespace)) { + return CompletableFuture.completedFuture(null); + } if (log.isDebugEnabled()) { log.debug("[{}] Checking replication status", name); @@ -528,14 +536,7 @@ public CompletableFuture checkReplication() { } if (!replicators.containsKey(cluster)) { - if (!startReplicator(cluster)) { - // it happens when global topic is a partitioned topic and replicator can't start on - // original - // non partitioned-topic (topic without partition prefix) - return FutureUtil - .failedFuture(new NamingException( - topic + " failed to start replicator for " + cluster)); - } + futures.add(startReplicator(cluster)); } } @@ -553,29 +554,35 @@ public CompletableFuture checkReplication() { } - boolean startReplicator(String remoteCluster) { + CompletableFuture startReplicator(String remoteCluster) { log.info("[{}] Starting replicator to remote: {}", topic, remoteCluster); String localCluster = brokerService.pulsar().getConfiguration().getClusterName(); return addReplicationCluster(remoteCluster, NonPersistentTopic.this, localCluster); } - protected boolean addReplicationCluster(String remoteCluster, NonPersistentTopic nonPersistentTopic, + protected CompletableFuture addReplicationCluster(String remoteCluster, NonPersistentTopic nonPersistentTopic, String localCluster) { - AtomicBoolean isReplicatorStarted = new AtomicBoolean(true); - replicators.computeIfAbsent(remoteCluster, r -> { - try { - return new NonPersistentReplicator(NonPersistentTopic.this, localCluster, remoteCluster, brokerService); - } catch (NamingException | PulsarServerException e) { - isReplicatorStarted.set(false); - log.error("[{}] Replicator startup failed due to partitioned-topic {}", topic, remoteCluster); - } - return null; - }); - // clean up replicator if startup is failed - if (!isReplicatorStarted.get()) { - replicators.remove(remoteCluster); - } - return isReplicatorStarted.get(); + return AbstractReplicator.validatePartitionedTopicAsync(nonPersistentTopic.getName(), brokerService) + .thenCompose(__ -> brokerService.pulsar().getPulsarResources().getClusterResources() + .getClusterAsync(remoteCluster) + .thenApply(clusterData -> + brokerService.getReplicationClient(remoteCluster, clusterData))) + .thenAccept(replicationClient -> { + replicators.computeIfAbsent(remoteCluster, r -> { + try { + return new NonPersistentReplicator(NonPersistentTopic.this, localCluster, + remoteCluster, brokerService, (PulsarClientImpl) replicationClient); + } catch (PulsarServerException e) { + log.error("[{}] Replicator startup failed {}", topic, remoteCluster, e); + } + return null; + }); + + // clean up replicator if startup is failed + if (replicators.containsKey(remoteCluster) && replicators.get(remoteCluster) == null) { + replicators.remove(remoteCluster); + } + }); } CompletableFuture removeReplicator(String remoteCluster) { @@ -697,6 +704,7 @@ public void updateRates(NamespaceStats nsStats, NamespaceBundleStats bundleStats double subMsgRateOut = 0; double subMsgThroughputOut = 0; double subMsgRateRedeliver = 0; + double subMsgAckRate = 0; // Start subscription name & consumers try { @@ -713,6 +721,7 @@ public void updateRates(NamespaceStats nsStats, NamespaceBundleStats bundleStats subMsgRateOut += consumerStats.msgRateOut; subMsgThroughputOut += consumerStats.msgThroughputOut; subMsgRateRedeliver += consumerStats.msgRateRedeliver; + subMsgAckRate += consumerStats.messageAckRate; // Populate consumer specific stats here StreamingStats.writeConsumerStats(topicStatsStream, subscription.getType(), consumerStats); @@ -725,6 +734,7 @@ public void updateRates(NamespaceStats nsStats, NamespaceBundleStats bundleStats topicStatsStream.writePair("msgBacklog", subscription.getNumberOfEntriesInBacklog(false)); topicStatsStream.writePair("msgRateExpired", subscription.getExpiredMessageRate()); topicStatsStream.writePair("msgRateOut", subMsgRateOut); + topicStatsStream.writePair("messageAckRate", subMsgAckRate); topicStatsStream.writePair("msgThroughputOut", subMsgThroughputOut); topicStatsStream.writePair("msgRateRedeliver", subMsgRateRedeliver); topicStatsStream.writePair("type", subscription.getTypeString()); @@ -881,6 +891,7 @@ public void checkGC() { } stopReplProducers().thenCompose(v -> delete(true, false, true)) + .thenAccept(__ -> tryToDeletePartitionedMetadata()) .thenRun(() -> log.info("[{}] Topic deleted successfully due to inactivity", topic)) .exceptionally(e -> { Throwable throwable = e.getCause(); @@ -902,6 +913,23 @@ public void checkGC() { } } + private CompletableFuture tryToDeletePartitionedMetadata() { + if (TopicName.get(topic).isPartitioned() && !deletePartitionedTopicMetadataWhileInactive()) { + return CompletableFuture.completedFuture(null); + } + TopicName topicName = TopicName.get(TopicName.get(topic).getPartitionedTopicName()); + try { + NamespaceResources.PartitionedTopicResources partitionedTopicResources = brokerService.pulsar() + .getPulsarResources().getNamespaceResources().getPartitionedTopicResources(); + if (!partitionedTopicResources.partitionedTopicExists(topicName)) { + return CompletableFuture.completedFuture(null); + } + return partitionedTopicResources.deletePartitionedTopicAsync(topicName); + } catch (Exception e) { + return FutureUtil.failedFuture(e); + } + } + @Override public void checkInactiveSubscriptions() { TopicName name = TopicName.get(topic); @@ -954,21 +982,32 @@ public CompletableFuture onPoliciesUpdate(Policies data) { isAllowAutoUpdateSchema = data.is_allow_auto_update_schema; schemaValidationEnforced = data.schema_validation_enforced; - producers.values().forEach(producer -> { - producer.checkPermissions(); - producer.checkEncryption(); - }); - subscriptions.forEach((subName, sub) -> sub.getConsumers().forEach(Consumer::checkPermissions)); + List> producerCheckFutures = new ArrayList<>(producers.size()); + producers.values().forEach(producer -> producerCheckFutures.add( + producer.checkPermissionsAsync().thenRun(producer::checkEncryption))); - if (data.inactive_topic_policies != null) { - this.inactiveTopicPolicies = data.inactive_topic_policies; - } else { - ServiceConfiguration cfg = brokerService.getPulsar().getConfiguration(); - resetInactiveTopicPolicies(cfg.getBrokerDeleteInactiveTopicsMode() - , cfg.getBrokerDeleteInactiveTopicsMaxInactiveDurationSeconds(), - cfg.isBrokerDeleteInactiveTopicsEnabled()); - } - return checkReplicationAndRetryOnFailure(); + return FutureUtil.waitForAll(producerCheckFutures).thenCompose((__) -> { + List> consumerCheckFutures = new ArrayList<>(); + subscriptions.forEach((subName, sub) -> sub.getConsumers().forEach(consumer -> { + consumerCheckFutures.add(consumer.checkPermissionsAsync()); + })); + + return FutureUtil.waitForAll(consumerCheckFutures) + .thenCompose((___) -> { + if (data.inactive_topic_policies != null) { + this.inactiveTopicPolicies = data.inactive_topic_policies; + } else { + ServiceConfiguration cfg = brokerService.getPulsar().getConfiguration(); + resetInactiveTopicPolicies(cfg.getBrokerDeleteInactiveTopicsMode() + , cfg.getBrokerDeleteInactiveTopicsMaxInactiveDurationSeconds(), + cfg.isBrokerDeleteInactiveTopicsEnabled()); + } + return checkReplicationAndRetryOnFailure(); + }); + }).exceptionally(ex -> { + log.error("[{}] update namespace polices : {} error", this.getName(), data, ex); + throw FutureUtil.wrapToCompletionException(ex); + }); } /** @@ -1002,10 +1041,12 @@ public CompletableFuture unsubscribe(String subscriptionName) { // That creates deadlock. so, execute remove it in different thread. return CompletableFuture.runAsync(() -> { NonPersistentSubscription sub = subscriptions.remove(subscriptionName); - // preserve accumulative stats form removed subscription - SubscriptionStatsImpl stats = sub.getStats(); - bytesOutFromRemovedSubscriptions.add(stats.bytesOutCounter); - msgOutFromRemovedSubscriptions.add(stats.msgOutCounter); + if (sub != null) { + // preserve accumulative stats form removed subscription + SubscriptionStatsImpl stats = sub.getStats(); + bytesOutFromRemovedSubscriptions.add(stats.bytesOutCounter); + msgOutFromRemovedSubscriptions.add(stats.msgOutCounter); + } }, brokerService.executor()); } @@ -1057,4 +1098,9 @@ public CompletableFuture truncate() { protected boolean isTerminated() { return false; } + + @Override + public boolean isPersistent() { + return false; + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java index f76dd752834fa..f7279968c51bb 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/CompactorSubscription.java @@ -21,7 +21,6 @@ import static com.google.common.base.Preconditions.checkArgument; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; @@ -34,7 +33,7 @@ import org.slf4j.LoggerFactory; public class CompactorSubscription extends PersistentSubscription { - private CompactedTopic compactedTopic; + private final CompactedTopic compactedTopic; public CompactorSubscription(PersistentTopic topic, CompactedTopic compactedTopic, String subscriptionName, ManagedCursor cursor) { @@ -48,8 +47,12 @@ public CompactorSubscription(PersistentTopic topic, CompactedTopic compactedTopi Map properties = cursor.getProperties(); if (properties.containsKey(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY)) { long compactedLedgerId = properties.get(Compactor.COMPACTED_TOPIC_LEDGER_PROPERTY); - compactedTopic.newCompactedLedger(cursor.getMarkDeletedPosition(), - compactedLedgerId); + compactedTopic.newCompactedLedger(cursor.getMarkDeletedPosition(), compactedLedgerId) + .thenAccept(previousContext -> { + if (previousContext != null) { + compactedTopic.deleteCompactedLedger(previousContext.getLedger().getId()); + } + }); } } @@ -65,15 +68,25 @@ public void acknowledgeMessage(List positions, AckType ackType, Map future = new CompletableFuture<>(); - cursor.asyncMarkDelete(position, properties, new MarkDeleteCallback() { + + // The newCompactedLedger must be called at the first step because we need to ensure the reader can read + // complete data from compacted Ledger, otherwise, if the original ledger been deleted the reader cursor + // might move to a subsequent original ledger if `compactionHorizon` have not updated, this will lead to + // the reader skips compacted data at that time, after the `compactionHorizon` updated, the reader able + // to read the complete compacted data again. + // And we can only delete the previous ledger after the mark delete succeed, otherwise we will loss the + // compacted data if mark delete failed. + compactedTopic.newCompactedLedger(position, compactedLedgerId).thenAccept(previousContext -> { + cursor.asyncMarkDelete(position, properties, new MarkDeleteCallback() { @Override public void markDeleteComplete(Object ctx) { if (log.isDebugEnabled()) { log.debug("[{}][{}] Mark deleted messages until position on compactor subscription {}", - topicName, subName, position); + topicName, subName, position); + } + if (previousContext != null) { + compactedTopic.deleteCompactedLedger(previousContext.getLedger().getId()); } - future.complete(null); } @Override @@ -81,19 +94,16 @@ public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { // TODO: cut consumer connection on markDeleteFailed if (log.isDebugEnabled()) { log.debug("[{}][{}] Failed to mark delete for position on compactor subscription {}", - topicName, subName, ctx, exception); + topicName, subName, ctx, exception); } } }, null); + }); if (topic.getManagedLedger().isTerminated() && cursor.getNumberOfEntriesInBacklog(false) == 0) { // Notify all consumer that the end of topic was reached dispatcher.getConsumers().forEach(Consumer::reachedEndOfTopic); } - - // Once properties have been persisted, we can notify the compacted topic to use - // the new ledger - future.thenAccept((v) -> compactedTopic.newCompactedLedger(position, compactedLedgerId)); } private static final Logger log = LoggerFactory.getLogger(CompactorSubscription.class); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java index 0df50cb623f7a..5b29a4f15c733 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java @@ -36,6 +36,7 @@ import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.ManagedLedgerException; +import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.Topic.PublishContext; @@ -79,7 +80,8 @@ enum Status { Failed, } - enum MessageDupStatus { + @VisibleForTesting + public enum MessageDupStatus { // whether a message is a definitely a duplicate or not cannot be determined at this time Unknown, // message is definitely NOT a duplicate @@ -100,12 +102,20 @@ public MessageDupUnknownException() { // Map that contains the highest sequenceId that have been sent by each producers. The map will be updated before // the messages are persisted @VisibleForTesting - final ConcurrentOpenHashMap highestSequencedPushed = new ConcurrentOpenHashMap<>(16, 1); + final ConcurrentOpenHashMap highestSequencedPushed = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); // Map that contains the highest sequenceId that have been persistent by each producers. The map will be updated // after the messages are persisted @VisibleForTesting - final ConcurrentOpenHashMap highestSequencedPersisted = new ConcurrentOpenHashMap<>(16, 1); + final ConcurrentOpenHashMap highestSequencedPersisted = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); // Number of persisted entries after which to store a snapshot of the sequence ids map private final int snapshotInterval; @@ -138,6 +148,7 @@ public MessageDeduplication(PulsarService pulsar, PersistentTopic topic, Managed private CompletableFuture recoverSequenceIdsMap() { // Load the sequence ids from the snapshot in the cursor properties managedCursor.getProperties().forEach((k, v) -> { + producerRemoved(k); highestSequencedPushed.put(k, v); highestSequencedPersisted.put(k, v); }); @@ -168,6 +179,7 @@ public void readEntriesComplete(List entries, Object ctx) { long sequenceId = Math.max(md.getHighestSequenceId(), md.getSequenceId()); highestSequencedPushed.put(producerName, sequenceId); highestSequencedPersisted.put(producerName, sequenceId); + producerRemoved(producerName); entry.release(); } @@ -310,7 +322,7 @@ public boolean isEnabled() { * @return true if the message should be published or false if it was recognized as a duplicate */ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf headersAndPayload) { - if (!isEnabled()) { + if (!isEnabled() || publishContext.isMarkerMessage()) { return MessageDupStatus.NotDup; } @@ -363,7 +375,7 @@ public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf heade * Call this method whenever a message is persisted to get the chance to trigger a snapshot. */ public void recordMessagePersisted(PublishContext publishContext, PositionImpl position) { - if (!isEnabled()) { + if (!isEnabled() || publishContext.isMarkerMessage()) { return; } @@ -395,7 +407,7 @@ public void resetHighestSequenceIdPushed() { } } - private void takeSnapshot(PositionImpl position) { + private void takeSnapshot(Position position) { if (log.isDebugEnabled()) { log.debug("[{}] Taking snapshot of sequence ids map", topic.getName()); } @@ -406,7 +418,7 @@ private void takeSnapshot(PositionImpl position) { } }); - managedCursor.asyncMarkDelete(position, snapshot, new MarkDeleteCallback() { + getManagedCursor().asyncMarkDelete(position, snapshot, new MarkDeleteCallback() { @Override public void markDeleteComplete(Object ctx) { if (log.isDebugEnabled()) { @@ -464,19 +476,23 @@ public synchronized void purgeInactiveProducers() { .toMillis(pulsar.getConfiguration().getBrokerDeduplicationProducerInactivityTimeoutMinutes()); Iterator> mapIterator = inactiveProducers.entrySet().iterator(); + boolean hasInactive = false; while (mapIterator.hasNext()) { java.util.Map.Entry entry = mapIterator.next(); String producerName = entry.getKey(); long lastActiveTimestamp = entry.getValue(); - mapIterator.remove(); - if (lastActiveTimestamp < minimumActiveTimestamp) { log.info("[{}] Purging dedup information for producer {}", topic.getName(), producerName); + mapIterator.remove(); highestSequencedPushed.remove(producerName); highestSequencedPersisted.remove(producerName); + hasInactive = true; } } + if (hasInactive && isEnabled()) { + takeSnapshot(getManagedCursor().getMarkDeletedPosition()); + } } public long getLastPublishedSequenceId(String producerName) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java index be143565c483f..7aaba9a4e1023 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java @@ -22,43 +22,43 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import org.apache.bookkeeper.mledger.impl.PositionImpl; -import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap; -import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap.LongPair; -import org.apache.pulsar.common.util.collections.ConcurrentSortedLongPairSet; -import org.apache.pulsar.common.util.collections.LongPairSet; +import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap; +import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair; +import org.apache.pulsar.utils.ConcurrentBitmapSortedLongPairSet; public class MessageRedeliveryController { - private final LongPairSet messagesToRedeliver; + private final ConcurrentBitmapSortedLongPairSet messagesToRedeliver; private final ConcurrentLongLongPairHashMap hashesToBeBlocked; public MessageRedeliveryController(boolean allowOutOfOrderDelivery) { - this.messagesToRedeliver = new ConcurrentSortedLongPairSet(128, 2); - this.hashesToBeBlocked = allowOutOfOrderDelivery ? null : new ConcurrentLongLongPairHashMap(128, 2); + this.messagesToRedeliver = new ConcurrentBitmapSortedLongPairSet(); + this.hashesToBeBlocked = allowOutOfOrderDelivery + ? null + : ConcurrentLongLongPairHashMap + .newBuilder().concurrencyLevel(2).expectedItems(128).autoShrink(true).build(); } - public boolean add(long ledgerId, long entryId) { - return messagesToRedeliver.add(ledgerId, entryId); + public void add(long ledgerId, long entryId) { + messagesToRedeliver.add(ledgerId, entryId); } - public boolean add(long ledgerId, long entryId, long stickyKeyHash) { + public void add(long ledgerId, long entryId, long stickyKeyHash) { if (hashesToBeBlocked != null) { hashesToBeBlocked.put(ledgerId, entryId, stickyKeyHash, 0); } - return messagesToRedeliver.add(ledgerId, entryId); + messagesToRedeliver.add(ledgerId, entryId); } - public boolean remove(long ledgerId, long entryId) { + public void remove(long ledgerId, long entryId) { if (hashesToBeBlocked != null) { hashesToBeBlocked.remove(ledgerId, entryId); } - return messagesToRedeliver.remove(ledgerId, entryId); + messagesToRedeliver.remove(ledgerId, entryId); } - public int removeAllUpTo(long markDeleteLedgerId, long markDeleteEntryId) { + public void removeAllUpTo(long markDeleteLedgerId, long markDeleteEntryId) { if (hashesToBeBlocked != null) { List keysToRemove = new ArrayList<>(); hashesToBeBlocked.forEach((ledgerId, entryId, stickyKeyHash, none) -> { @@ -70,10 +70,7 @@ public int removeAllUpTo(long markDeleteLedgerId, long markDeleteEntryId) { keysToRemove.forEach(longPair -> hashesToBeBlocked.remove(longPair.first, longPair.second)); keysToRemove.clear(); } - return messagesToRedeliver.removeIf((ledgerId, entryId) -> { - return ComparisonChain.start().compare(ledgerId, markDeleteLedgerId).compare(entryId, markDeleteEntryId) - .result() <= 0; - }); + messagesToRedeliver.removeUpTo(markDeleteLedgerId, markDeleteEntryId + 1); } public boolean isEmpty() { @@ -104,17 +101,6 @@ public boolean containsStickyKeyHashes(Set stickyKeyHashes) { } public Set getMessagesToReplayNow(int maxMessagesToRead) { - if (hashesToBeBlocked != null) { - // allowOutOfOrderDelivery is false - return messagesToRedeliver.items().stream() - .sorted((l1, l2) -> ComparisonChain.start().compare(l1.first, l2.first) - .compare(l1.second, l2.second).result()) - .limit(maxMessagesToRead).map(longPair -> new PositionImpl(longPair.first, longPair.second)) - .collect(Collectors.toCollection(TreeSet::new)); - } else { - // allowOutOfOrderDelivery is true - return messagesToRedeliver.items(maxMessagesToRead, - (ledgerId, entryId) -> new PositionImpl(ledgerId, entryId)); - } + return messagesToRedeliver.items(maxMessagesToRead, PositionImpl::new); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java index 06a5c4cad518d..02fc80507636f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherMultipleConsumers.java @@ -55,7 +55,7 @@ import org.apache.pulsar.broker.service.StickyKeyConsumerSelector; import org.apache.pulsar.broker.service.Subscription; import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.Type; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionNotSealedException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.api.proto.MessageMetadata; @@ -83,6 +83,7 @@ public class PersistentDispatcherMultipleConsumers extends AbstractDispatcherMul protected volatile boolean havePendingRead = false; protected volatile boolean havePendingReplayRead = false; + protected volatile PositionImpl minReplayedPosition = null; protected boolean shouldRewindBeforeReadingOrReplaying = false; protected final String name; @@ -92,8 +93,7 @@ public class PersistentDispatcherMultipleConsumers extends AbstractDispatcherMul "totalAvailablePermits"); protected volatile int totalAvailablePermits = 0; protected volatile int readBatchSize; - protected final Backoff readFailureBackoff = new Backoff(15, TimeUnit.SECONDS, - 1, TimeUnit.MINUTES, 0, TimeUnit.MILLISECONDS); + protected final Backoff readFailureBackoff; private static final AtomicIntegerFieldUpdater TOTAL_UNACKED_MESSAGES_UPDATER = AtomicIntegerFieldUpdater.newUpdater(PersistentDispatcherMultipleConsumers.class, @@ -128,6 +128,10 @@ public PersistentDispatcherMultipleConsumers(PersistentTopic topic, ManagedCurso : RedeliveryTrackerDisabled.REDELIVERY_TRACKER_DISABLED; this.readBatchSize = serviceConfig.getDispatcherMaxReadBatchSize(); this.initializeDispatchRateLimiterIfNeeded(Optional.empty()); + this.readFailureBackoff = new Backoff( + topic.getBrokerService().pulsar().getConfiguration().getDispatcherReadFailureBackoffInitialTimeInMs(), + TimeUnit.MILLISECONDS, + 1, TimeUnit.MINUTES, 0, TimeUnit.MILLISECONDS); } @Override @@ -146,6 +150,7 @@ public synchronized void addConsumer(Consumer consumer) throws BrokerServiceExce shouldRewindBeforeReadingOrReplaying = false; } redeliveryMessages.clear(); + delayedDeliveryTracker.ifPresent(DelayedDeliveryTracker::clear); } if (isConsumersExceededOnSubscription()) { @@ -154,7 +159,10 @@ public synchronized void addConsumer(Consumer consumer) throws BrokerServiceExce } consumerList.add(consumer); - consumerList.sort(Comparator.comparingInt(Consumer::getPriorityLevel)); + if (consumerList.size() > 1 + && consumer.getPriorityLevel() < consumerList.get(consumerList.size() - 2).getPriorityLevel()) { + consumerList.sort(Comparator.comparingInt(Consumer::getPriorityLevel)); + } consumerSet.add(consumer); } @@ -222,6 +230,10 @@ public synchronized void consumerFlow(Consumer consumer, int additionalNumberOfM } public synchronized void readMoreEntries() { + if (shouldPauseDeliveryForDelayTracker()) { + return; + } + // totalAvailablePermits may be updated by other threads int firstAvailableConsumerPermits = getFirstAvailableConsumerPermits(); int currentTotalAvailablePermits = Math.max(totalAvailablePermits, firstAvailableConsumerPermits); @@ -244,6 +256,7 @@ public synchronized void readMoreEntries() { } havePendingReplayRead = true; + minReplayedPosition = messagesToReplayNow.stream().min(PositionImpl::compareTo).orElse(null); Set deletedMessages = topic.isDelayedDeliveryEnabled() ? asyncReplayEntriesInOrder(messagesToReplayNow) : asyncReplayEntries(messagesToReplayNow); // clear already acked positions from replay bucket @@ -267,6 +280,11 @@ public synchronized void readMoreEntries() { consumerList.size()); } havePendingRead = true; + Set toReplay = getMessagesToReplayNow(1); + minReplayedPosition = toReplay.stream().findFirst().orElse(null); + if (minReplayedPosition != null) { + redeliveryMessages.add(minReplayedPosition.getLedgerId(), minReplayedPosition.getEntryId()); + } cursor.asyncReadEntriesOrWait(messagesToRead, bytesToRead, this, ReadType.Normal, topic.getMaxReadPosition()); } else { @@ -287,8 +305,9 @@ protected Pair calculateToRead(int currentTotalAvailablePermits) Consumer c = getRandomConsumer(); // if turn on precise dispatcher flow control, adjust the record to read if (c != null && c.isPreciseDispatcherFlowControl()) { + int avgMessagesPerEntry = Math.max(1, c.getAvgMessagesPerEntry()); messagesToRead = Math.min( - (int) Math.ceil(currentTotalAvailablePermits * 1.0 / c.getAvgMessagesPerEntry()), + (int) Math.ceil(currentTotalAvailablePermits * 1.0 / avgMessagesPerEntry), readBatchSize); } @@ -517,6 +536,11 @@ protected void sendMessagesToConsumers(ReadType readType, List entries) { // round-robin dispatch batch size for this consumer int availablePermits = c.isWritable() ? c.getAvailablePermits() : 1; + if (c.getMaxUnackedMessages() > 0) { + // Avoid negative number + int remainUnAckedMessages = Math.max(c.getMaxUnackedMessages() - c.getUnackedMessages(), 0); + availablePermits = Math.min(availablePermits, remainUnAckedMessages); + } if (log.isDebugEnabled() && !c.isWritable()) { log.debug("[{}-{}] consumer is not writable. dispatching only 1 message to {}; " + "availablePermits are {}", topic.getName(), name, @@ -592,7 +616,9 @@ protected void sendMessagesToConsumers(ReadType readType, List entries) { entry.release(); }); } - readMoreEntries(); + // We should not call readMoreEntries() recursively in the same thread + // as there is a risk of StackOverflowError + topic.getBrokerService().executor().execute(this::readMoreEntries); } @Override @@ -607,7 +633,8 @@ public synchronized void readEntriesFailed(ManagedLedgerException exception, Obj // Notify the consumer only if all the messages were already acknowledged consumerList.forEach(Consumer::reachedEndOfTopic); } - } else if (exception.getCause() instanceof TransactionNotSealedException) { + } else if (exception.getCause() instanceof TransactionBufferException.TransactionNotSealedException + || exception.getCause() instanceof ManagedLedgerException.OffloadReadHandleClosedException) { waitTimeMillis = 1; if (log.isDebugEnabled()) { log.debug("[{}] Error reading transaction entries : {}, Read Type {} - Retrying to read in {} seconds", @@ -642,7 +669,10 @@ public synchronized void readEntriesFailed(ManagedLedgerException exception, Obj topic.getBrokerService().executor().schedule(() -> { synchronized (PersistentDispatcherMultipleConsumers.this) { - if (!havePendingRead) { + // If it's a replay read we need to retry even if there's already + // another scheduled read, otherwise we'd be stuck until + // more messages are published. + if (!havePendingRead || readType == ReadType.Replay) { log.info("[{}] Retrying read operation", name); readMoreEntries(); } else { @@ -830,13 +860,20 @@ public boolean trackDelayedDelivery(long ledgerId, long entryId, MessageMetadata synchronized (this) { if (!delayedDeliveryTracker.isPresent()) { + if (!msgMetadata.hasDeliverAtTime()) { + // No need to initialize the tracker here + return false; + } + // Initialize the tracker the first time we need to use it delayedDeliveryTracker = Optional .of(topic.getBrokerService().getDelayedDeliveryTrackerFactory().newTracker(this)); } delayedDeliveryTracker.get().resetTickTime(topic.getDelayedDeliveryTickTimeMillis()); - return delayedDeliveryTracker.get().addMessage(ledgerId, entryId, msgMetadata.getDeliverAtTime()); + + long deliverAtTime = msgMetadata.hasDeliverAtTime() ? msgMetadata.getDeliverAtTime() : -1L; + return delayedDeliveryTracker.get().addMessage(ledgerId, entryId, deliverAtTime); } } @@ -845,12 +882,19 @@ protected synchronized Set getMessagesToReplayNow(int maxMessagesT return redeliveryMessages.getMessagesToReplayNow(maxMessagesToRead); } else if (delayedDeliveryTracker.isPresent() && delayedDeliveryTracker.get().hasMessageAvailable()) { delayedDeliveryTracker.get().resetTickTime(topic.getDelayedDeliveryTickTimeMillis()); - return delayedDeliveryTracker.get().getScheduledMessages(maxMessagesToRead); + Set messagesAvailableNow = + delayedDeliveryTracker.get().getScheduledMessages(maxMessagesToRead); + messagesAvailableNow.forEach(p -> redeliveryMessages.add(p.getLedgerId(), p.getEntryId())); + return messagesAvailableNow; } else { return Collections.emptySet(); } } + protected synchronized boolean shouldPauseDeliveryForDelayTracker() { + return delayedDeliveryTracker.isPresent() && delayedDeliveryTracker.get().shouldPauseAllDeliveries(); + } + @Override public synchronized long getNumberOfDelayedMessages() { return delayedDeliveryTracker.map(DelayedDeliveryTracker::getNumberOfDelayedMessages).orElse(0L); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java index fa7ac03a18221..727c4f09af9aa 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentDispatcherSingleActiveConsumer.java @@ -30,6 +30,7 @@ import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.ManagedLedgerException; +import org.apache.bookkeeper.mledger.ManagedLedgerException.ConcurrentWaitCallbackException; import org.apache.bookkeeper.mledger.ManagedLedgerException.NoMoreEntriesToReadException; import org.apache.bookkeeper.mledger.ManagedLedgerException.TooManyRequestsException; import org.apache.bookkeeper.mledger.impl.PositionImpl; @@ -45,7 +46,7 @@ import org.apache.pulsar.broker.service.SendMessageInfo; import org.apache.pulsar.broker.service.Subscription; import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.Type; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionNotSealedException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.policies.data.DispatchRate; @@ -58,15 +59,13 @@ public class PersistentDispatcherSingleActiveConsumer extends AbstractDispatcher implements Dispatcher, ReadEntriesCallback { protected final PersistentTopic topic; - protected final ManagedCursor cursor; protected final String name; private Optional dispatchRateLimiter = Optional.empty(); protected volatile boolean havePendingRead = false; protected volatile int readBatchSize; - protected final Backoff readFailureBackoff = new Backoff(15, TimeUnit.SECONDS, - 1, TimeUnit.MINUTES, 0, TimeUnit.MILLISECONDS); + protected final Backoff readFailureBackoff; private volatile ScheduledFuture readOnActiveConsumerTask = null; private final RedeliveryTracker redeliveryTracker; @@ -74,12 +73,15 @@ public class PersistentDispatcherSingleActiveConsumer extends AbstractDispatcher public PersistentDispatcherSingleActiveConsumer(ManagedCursor cursor, SubType subscriptionType, int partitionIndex, PersistentTopic topic, Subscription subscription) { super(subscriptionType, partitionIndex, topic.getName(), subscription, - topic.getBrokerService().pulsar().getConfiguration()); + topic.getBrokerService().pulsar().getConfiguration(), cursor); this.topic = topic; this.name = topic.getName() + " / " + (cursor.getName() != null ? Codec.decode(cursor.getName()) : ""/* NonDurableCursor doesn't have name */); - this.cursor = cursor; this.readBatchSize = serviceConfig.getDispatcherMaxReadBatchSize(); + this.readFailureBackoff = new Backoff(serviceConfig.getDispatcherReadFailureBackoffInitialTimeInMs(), + TimeUnit.MILLISECONDS, serviceConfig.getDispatcherReadFailureBackoffMaxTimeInMs(), + TimeUnit.MILLISECONDS, serviceConfig.getDispatcherReadFailureBackoffMandatoryStopTimeInMs(), + TimeUnit.MILLISECONDS); this.redeliveryTracker = RedeliveryTrackerDisabled.REDELIVERY_TRACKER_DISABLED; this.initializeDispatchRateLimiterIfNeeded(Optional.empty()); } @@ -294,20 +296,13 @@ private synchronized void internalRedeliverUnacknowledgedMessages(Consumer consu name, consumer); return; } - - cancelPendingRead(); - - if (!havePendingRead) { - cursor.rewind(); - if (log.isDebugEnabled()) { - log.debug("[{}-{}] Cursor rewinded, redelivering unacknowledged messages. ", name, consumer); - } - readMoreEntries(consumer); - } else { - log.info("[{}-{}] Ignoring reDeliverUnAcknowledgedMessages: cancelPendingRequest on cursor failed", name, - consumer); + cursor.cancelPendingReadRequest(); + havePendingRead = false; + cursor.rewind(); + if (log.isDebugEnabled()) { + log.debug("[{}-{}] Cursor rewinded, redelivering unacknowledged messages. ", name, consumer); } - + readMoreEntries(consumer); } @Override @@ -368,7 +363,7 @@ protected Pair calculateToRead(Consumer consumer) { long bytesToRead = serviceConfig.getDispatcherMaxReadSizeBytes(); // if turn of precise dispatcher flow control, adjust the records to read if (consumer.isPreciseDispatcherFlowControl()) { - int avgMessagesPerEntry = consumer.getAvgMessagesPerEntry(); + int avgMessagesPerEntry = Math.max(1, consumer.getAvgMessagesPerEntry()); messagesToRead = Math.min((int) Math.ceil(availablePermits * 1.0 / avgMessagesPerEntry), readBatchSize); } @@ -458,6 +453,12 @@ private synchronized void internalReadEntriesFailed(ManagedLedgerException excep havePendingRead = false; Consumer c = (Consumer) ctx; + if (exception instanceof ConcurrentWaitCallbackException) { + // At most one pending read request is allowed when there are no more entries, we should not trigger more + // read operations in this case and just wait the existing read operation completes. + return; + } + long waitTimeMillis = readFailureBackoff.next(); if (exception instanceof NoMoreEntriesToReadException) { @@ -466,7 +467,8 @@ private synchronized void internalReadEntriesFailed(ManagedLedgerException excep // Notify the consumer only if all the messages were already acknowledged consumers.forEach(Consumer::reachedEndOfTopic); } - } else if (exception.getCause() instanceof TransactionNotSealedException) { + } else if (exception.getCause() instanceof TransactionBufferException.TransactionNotSealedException + || exception.getCause() instanceof ManagedLedgerException.OffloadReadHandleClosedException) { waitTimeMillis = 1; if (log.isDebugEnabled()) { log.debug("[{}] Error reading transaction entries : {}, - Retrying to read in {} seconds", name, diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java index aa53e150672ea..c10b70df6a87b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentReplicator.java @@ -45,7 +45,6 @@ import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.service.AbstractReplicator; import org.apache.pulsar.broker.service.BrokerService; -import org.apache.pulsar.broker.service.BrokerServiceException.NamingException; import org.apache.pulsar.broker.service.BrokerServiceException.TopicBusyException; import org.apache.pulsar.broker.service.Replicator; import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter.Type; @@ -54,6 +53,7 @@ import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.client.impl.ProducerImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.SendCallback; import org.apache.pulsar.common.api.proto.MarkerType; import org.apache.pulsar.common.policies.data.Policies; @@ -104,9 +104,13 @@ public class PersistentReplicator extends AbstractReplicator private final ReplicatorStatsImpl stats = new ReplicatorStatsImpl(); + private volatile boolean fetchSchemaInProgress = false; + public PersistentReplicator(PersistentTopic topic, ManagedCursor cursor, String localCluster, String remoteCluster, - BrokerService brokerService) throws NamingException, PulsarServerException { - super(topic.getName(), topic.getReplicatorPrefix(), localCluster, remoteCluster, brokerService); + BrokerService brokerService, PulsarClientImpl replicationClient) + throws PulsarServerException { + super(topic.getName(), topic.getReplicatorPrefix(), localCluster, remoteCluster, brokerService, + replicationClient); this.topic = topic; this.cursor = cursor; this.expiryMonitor = new PersistentMessageExpiryMonitor(topicName, @@ -217,6 +221,11 @@ private int getAvailablePermits() { } protected void readMoreEntries() { + if (fetchSchemaInProgress) { + log.info("[{}][{} -> {}] Skip the reading due to new detected schema", + topicName, localCluster, remoteCluster); + return; + } int availablePermits = getAvailablePermits(); if (availablePermits > 0) { @@ -287,8 +296,15 @@ public void readEntriesComplete(List entries, Object ctx) { // This flag is set to true when we skip atleast one local message, // in order to skip remaining local messages. boolean isLocalMessageSkippedOnce = false; + boolean skipRemainingMessages = false; for (int i = 0; i < entries.size(); i++) { Entry entry = entries.get(i); + // Skip the messages since the replicator need to fetch the schema info to replicate the schema to the + // remote cluster. Rewind the cursor first and continue the message read after fetched the schema. + if (skipRemainingMessages) { + entry.release(); + continue; + } int length = entry.getLength(); ByteBuf headersAndPayload = entry.getDataBuffer(); MessageImpl msg; @@ -361,16 +377,34 @@ public void readEntriesComplete(List entries, Object ctx) { headersAndPayload.retain(); - getSchemaInfo(msg).thenAccept(schemaInfo -> { - msg.setSchemaInfoForReplicator(schemaInfo); + CompletableFuture schemaFuture = getSchemaInfo(msg); + if (!schemaFuture.isDone() || schemaFuture.isCompletedExceptionally()) { + entry.release(); + headersAndPayload.release(); + msg.recycle(); + // Mark the replicator is fetching the schema for now and rewind the cursor + // and trigger the next read after complete the schema fetching. + fetchSchemaInProgress = true; + skipRemainingMessages = true; + cursor.cancelPendingReadRequest(); + log.info("[{}][{} -> {}] Pause the data replication due to new detected schema", topicName, + localCluster, remoteCluster); + schemaFuture.whenComplete((__, e) -> { + if (e != null) { + log.warn("[{}][{} -> {}] Failed to get schema from local cluster, will try in the next loop", + topicName, localCluster, remoteCluster, e); + } + log.info("[{}][{} -> {}] Resume the data replication after the schema fetching done", topicName, + localCluster, remoteCluster); + cursor.rewind(); + fetchSchemaInProgress = false; + readMoreEntries(); + }); + } else { + msg.setSchemaInfoForReplicator(schemaFuture.get()); producer.sendAsync(msg, ProducerSendCallback.create(this, entry, msg)); - }).exceptionally(ex -> { - log.error("[{}][{} -> {}] Failed to get schema from local cluster", topicName, - localCluster, remoteCluster, ex); - return null; - }); - - atLeastOneMessageSentForReplication = true; + atLeastOneMessageSentForReplication = true; + } } } catch (Exception e) { log.error("[{}][{} -> {}] Unexpected exception: {}", topicName, localCluster, remoteCluster, e.getMessage(), @@ -501,7 +535,7 @@ public MessageImpl getNextMessage() { @Override public CompletableFuture getFuture() { - return null; + return CompletableFuture.completedFuture(null); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java index f3bcbf2b2e9c9..844b72607dfa6 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumers.java @@ -169,6 +169,44 @@ protected void sendMessagesToConsumers(ReadType readType, List entries) { return; } + // A corner case that we have to retry a readMoreEntries in order to preserver order delivery. + // This may happen when consumer closed. See issue #12885 for details. + if (!allowOutOfOrderDelivery) { + Set messagesToReplayNow = this.getMessagesToReplayNow(1); + if (messagesToReplayNow != null && !messagesToReplayNow.isEmpty()) { + PositionImpl replayPosition = messagesToReplayNow.stream().findFirst().get(); + // We have received a message potentially from the delayed tracker and, since we're not using it + // right now, it needs to be added to the redelivery tracker or we won't attempt anymore to + // resend it (until we disconnect consumer). + redeliveryMessages.add(replayPosition.getLedgerId(), replayPosition.getEntryId()); + + if (this.minReplayedPosition != null) { + // If relayPosition is a new entry wither smaller position is inserted for redelivery during this + // async read, it is possible that this relayPosition should dispatch to consumer first. So in + // order to preserver order delivery, we need to discard this read result, and try to trigger a + // replay read, that containing "relayPosition", by calling readMoreEntries. + if (replayPosition.compareTo(minReplayedPosition) < 0) { + if (log.isDebugEnabled()) { + log.debug("[{}] Position {} (<{}) is inserted for relay during current {} read, " + + "discard this read and retry with readMoreEntries.", + name, replayPosition, minReplayedPosition, readType); + } + if (readType == ReadType.Normal) { + entries.forEach(entry -> { + long stickyKeyHash = getStickyKeyHash(entry); + addMessageToReplay(entry.getLedgerId(), entry.getEntryId(), stickyKeyHash); + entry.release(); + }); + } else if (readType == ReadType.Replay) { + entries.forEach(Entry::release); + } + readMoreEntries(); + return; + } + } + } + } + nextStuckConsumers.clear(); final Map> groupedEntries = localGroupedEntries.get(); @@ -182,6 +220,7 @@ protected void sendMessagesToConsumers(ReadType readType, List entries) { groupedEntries.computeIfAbsent(c, k -> new ArrayList<>()).add(entry); consumerStickyKeyHashesMap.computeIfAbsent(c, k -> new HashSet<>()).add(stickyKeyHash); } else { + addMessageToReplay(entry.getLedgerId(), entry.getEntryId(), stickyKeyHash); entry.release(); } } @@ -196,7 +235,13 @@ protected void sendMessagesToConsumers(ReadType readType, List entries) { Consumer consumer = current.getKey(); List entriesWithSameKey = current.getValue(); int entriesWithSameKeyCount = entriesWithSameKey.size(); - final int availablePermits = consumer == null ? 0 : Math.max(consumer.getAvailablePermits(), 0); + int availablePermits = consumer == null ? 0 : Math.max(consumer.getAvailablePermits(), 0); + if (consumer != null && consumer.getMaxUnackedMessages() > 0) { + int remainUnAckedMessages = + // Avoid negative number + Math.max(consumer.getMaxUnackedMessages() - consumer.getUnackedMessages(), 0); + availablePermits = Math.min(availablePermits, remainUnAckedMessages); + } int maxMessagesForC = Math.min(entriesWithSameKeyCount, availablePermits); int messagesForC = getRestrictedMaxEntriesForConsumer(consumer, entriesWithSameKey, maxMessagesForC, readType, consumerStickyKeyHashesMap.get(consumer)); @@ -283,7 +328,7 @@ protected void sendMessagesToConsumers(ReadType readType, List entries) { } // readMoreEntries should run regardless whether or not stuck is caused by // stuckConsumers for avoid stopping dispatch. - readMoreEntries(); + topic.getBrokerService().executor().execute(() -> readMoreEntries()); } else if (currentThreadKeyNumber == 0) { topic.getBrokerService().executor().schedule(() -> { synchronized (PersistentStickyKeyDispatcherMultipleConsumers.this) { @@ -359,13 +404,19 @@ private int getRestrictedMaxEntriesForConsumer(Consumer consumer, List en } @Override - public synchronized void markDeletePositionMoveForward() { - if (recentlyJoinedConsumers != null && !recentlyJoinedConsumers.isEmpty() - && removeConsumersFromRecentJoinedConsumers()) { - // After we process acks, we need to check whether the mark-delete position was advanced and we can finally - // read more messages. It's safe to call readMoreEntries() multiple times. - readMoreEntries(); - } + public void markDeletePositionMoveForward() { + // Execute the notification in different thread to avoid a mutex chain here + // from the delete operation that was completed + topic.getBrokerService().getTopicOrderedExecutor().execute(() -> { + synchronized (PersistentStickyKeyDispatcherMultipleConsumers.this) { + if (recentlyJoinedConsumers != null && !recentlyJoinedConsumers.isEmpty() + && removeConsumersFromRecentJoinedConsumers()) { + // After we process acks, we need to check whether the mark-delete position was advanced and we + // can finally read more messages. It's safe to call readMoreEntries() multiple times. + readMoreEntries(); + } + } + }); } private boolean removeConsumersFromRecentJoinedConsumers() { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java index acfd9ee192cb5..93ad2e80a642c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentSubscription.java @@ -29,7 +29,6 @@ import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import java.util.concurrent.atomic.LongAdder; import java.util.stream.Collectors; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.AsyncCallbacks.ClearBacklogCallback; @@ -50,6 +49,7 @@ import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.intercept.BrokerInterceptor; +import org.apache.pulsar.broker.service.AbstractSubscription; import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.service.BrokerServiceException.NotAllowedException; import org.apache.pulsar.broker.service.BrokerServiceException.ServerMetadataException; @@ -69,7 +69,6 @@ import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; import org.apache.pulsar.common.api.proto.KeySharedMeta; import org.apache.pulsar.common.api.proto.KeySharedMode; -import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.api.proto.ReplicatedSubscriptionsSnapshot; import org.apache.pulsar.common.api.proto.TxnAction; import org.apache.pulsar.common.naming.TopicName; @@ -77,13 +76,11 @@ import org.apache.pulsar.common.policies.data.TransactionPendingAckStats; import org.apache.pulsar.common.policies.data.stats.ConsumerStatsImpl; import org.apache.pulsar.common.policies.data.stats.SubscriptionStatsImpl; -import org.apache.pulsar.common.protocol.Commands; -import org.apache.pulsar.common.protocol.Markers; import org.apache.pulsar.common.util.FutureUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class PersistentSubscription implements Subscription { +public class PersistentSubscription extends AbstractSubscription implements Subscription { protected final PersistentTopic topic; protected final ManagedCursor cursor; protected volatile Dispatcher dispatcher; @@ -114,26 +111,12 @@ public class PersistentSubscription implements Subscription { private static final Map NON_REPLICATED_SUBSCRIPTION_CURSOR_PROPERTIES = Collections.emptyMap(); private volatile ReplicatedSubscriptionSnapshotCache replicatedSubscriptionSnapshotCache; - private volatile Position lastMarkDeleteForTransactionMarker; private final PendingAckHandle pendingAckHandle; - private final LongAdder bytesOutFromRemovedConsumers = new LongAdder(); - private final LongAdder msgOutFromRemovedConsumer = new LongAdder(); - - private DeleteTransactionMarkerState deleteTransactionMarkerState = DeleteTransactionMarkerState.None; - - private final Object waitObject = new Object(); - static { REPLICATED_SUBSCRIPTION_CURSOR_PROPERTIES.put(REPLICATED_SUBSCRIPTION_PROPERTY, 1L); } - public enum DeleteTransactionMarkerState { - Process, - Wait, - None - } - static Map getBaseCursorProperties(boolean isReplicated) { return isReplicated ? REPLICATED_SUBSCRIPTION_CURSOR_PROPERTIES : NON_REPLICATED_SUBSCRIPTION_CURSOR_PROPERTIES; } @@ -313,6 +296,7 @@ public synchronized void removeConsumer(Consumer consumer, boolean isResetCursor if (dispatcher != null && dispatcher.getConsumers().isEmpty()) { deactivateCursor(); + topic.getManagedLedger().removeWaitingCursor(cursor); if (!cursor.isDurable()) { // If cursor is not durable, we need to clean up the subscription as well @@ -419,8 +403,6 @@ public void acknowledgeMessage(List positions, AckType ackType, Map positions, AckType ackType, Map properties) { - - if (topic.getBrokerService().getPulsar().getConfig().isTransactionCoordinatorEnabled()) { - PositionImpl currentMarkDeletePosition = (PositionImpl) cursor.getMarkDeletedPosition(); - if ((lastMarkDeleteForTransactionMarker == null - || ((PositionImpl) lastMarkDeleteForTransactionMarker) - .compareTo(currentMarkDeletePosition) < 0)) { - if (currentMarkDeletePosition != null) { - ManagedLedgerImpl managedLedger = ((ManagedLedgerImpl) cursor.getManagedLedger()); - PositionImpl nextPosition = managedLedger.getNextValidPosition(currentMarkDeletePosition); - if (nextPosition != null - && nextPosition.compareTo((PositionImpl) managedLedger.getLastConfirmedEntry()) <= 0) { - synchronized (waitObject) { - if (deleteTransactionMarkerState == DeleteTransactionMarkerState.None) { - deleteTransactionMarkerState = DeleteTransactionMarkerState.Process; - managedLedger.asyncReadEntry(nextPosition, new ReadEntryCallback() { - @Override - public void readEntryComplete(Entry entry, Object ctx) { - try { - MessageMetadata messageMetadata = - Commands.parseMessageMetadata(entry.getDataBuffer()); - if (Markers.isTxnCommitMarker(messageMetadata) - || Markers.isTxnAbortMarker(messageMetadata)) { - synchronized (waitObject) { - deleteTransactionMarkerState = DeleteTransactionMarkerState.None; - } - lastMarkDeleteForTransactionMarker = currentMarkDeletePosition; - acknowledgeMessage(Collections.singletonList(nextPosition), - AckType.Individual, properties); - } else { - synchronized (waitObject) { - if (deleteTransactionMarkerState - == DeleteTransactionMarkerState.Wait) { - deleteTransactionMarkerState = - DeleteTransactionMarkerState.None; - deleteTransactionMarker(properties); - } else { - deleteTransactionMarkerState = - DeleteTransactionMarkerState.None; - } - } - } - } finally { - entry.release(); - } - } - - @Override - public void readEntryFailed(ManagedLedgerException exception, Object ctx) { - synchronized (waitObject) { - deleteTransactionMarkerState = - DeleteTransactionMarkerState.None; - } - log.error("Fail to read transaction marker! Position : {}", - currentMarkDeletePosition, exception); - } - }, null); - } else if (deleteTransactionMarkerState == DeleteTransactionMarkerState.Process) { - deleteTransactionMarkerState = DeleteTransactionMarkerState.Wait; - } - } - } - } - } - } - } - public CompletableFuture transactionIndividualAcknowledge( TxnID txnId, List> positions) { - return pendingAckHandle.individualAcknowledgeMessage(txnId, positions, false); + return pendingAckHandle.individualAcknowledgeMessage(txnId, positions); } public CompletableFuture transactionCumulativeAcknowledge(TxnID txnId, List positions) { - return pendingAckHandle.cumulativeAcknowledgeMessage(txnId, positions, false); + return pendingAckHandle.cumulativeAcknowledgeMessage(txnId, positions); } private final MarkDeleteCallback markDeleteCallback = new MarkDeleteCallback() { @@ -741,7 +656,15 @@ private void resetCursor(Position finalPosition, CompletableFuture future) topicName, subName); try { - cursor.asyncResetCursor(finalPosition, new AsyncCallbacks.ResetCursorCallback() { + boolean forceReset = false; + if (topic.getCompactedTopic() != null && topic.getCompactedTopic().getCompactionHorizon().isPresent()) { + PositionImpl horizon = (PositionImpl) topic.getCompactedTopic().getCompactionHorizon().get(); + PositionImpl resetTo = (PositionImpl) finalPosition; + if (horizon.compareTo(resetTo) >= 0) { + forceReset = true; + } + } + cursor.asyncResetCursor(finalPosition, forceReset, new AsyncCallbacks.ResetCursorCallback() { @Override public void resetComplete(Object ctx) { if (log.isDebugEnabled()) { @@ -1031,6 +954,7 @@ public SubscriptionStatsImpl getStats(Boolean getPreciseBacklog, boolean subscri subStats.bytesOutCounter += consumerStats.bytesOutCounter; subStats.msgOutCounter += consumerStats.msgOutCounter; subStats.msgRateRedeliver += consumerStats.msgRateRedeliver; + subStats.messageAckRate += consumerStats.messageAckRate; subStats.chunkedMessageRate += consumerStats.chunkedMessageRate; subStats.unackedMessages += consumerStats.unackedMessages; subStats.lastConsumedTimestamp = @@ -1173,14 +1097,14 @@ public void processReplicatedSubscriptionSnapshot(ReplicatedSubscriptionsSnapsho public CompletableFuture endTxn(long txnidMostBits, long txnidLeastBits, int txnAction, long lowWaterMark) { TxnID txnID = new TxnID(txnidMostBits, txnidLeastBits); if (TxnAction.COMMIT.getValue() == txnAction) { - return pendingAckHandle.commitTxn(txnID, Collections.emptyMap(), lowWaterMark, false); + return pendingAckHandle.commitTxn(txnID, Collections.emptyMap(), lowWaterMark); } else if (TxnAction.ABORT.getValue() == txnAction) { Consumer redeliverConsumer = null; if (getDispatcher() instanceof PersistentDispatcherSingleActiveConsumer) { redeliverConsumer = ((PersistentDispatcherSingleActiveConsumer) getDispatcher()).getActiveConsumer(); } - return pendingAckHandle.abortTxn(txnID, redeliverConsumer, lowWaterMark, false); + return pendingAckHandle.abortTxn(txnID, redeliverConsumer, lowWaterMark); } else { return FutureUtil.failedFuture(new NotAllowedException("Unsupported txnAction " + txnAction)); } @@ -1219,5 +1143,9 @@ public CompletableFuture getPendingAckManageLedger() { } } + public boolean checkIfPendingAckStoreInit() { + return this.pendingAckHandle.checkIfPendingAckStoreInit(); + } + private static final Logger log = LoggerFactory.getLogger(PersistentSubscription.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java index 64e4b7ff4ed49..2d154bc54f8bc 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java @@ -44,7 +44,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.LongAdder; import java.util.function.BiFunction; import java.util.stream.Collectors; import lombok.Getter; @@ -72,10 +71,12 @@ import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.bookkeeper.net.BookieId; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.resources.NamespaceResources.PartitionedTopicResources; +import org.apache.pulsar.broker.service.AbstractReplicator; import org.apache.pulsar.broker.service.AbstractTopic; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.BrokerServiceException; @@ -116,6 +117,7 @@ import org.apache.pulsar.client.impl.BatchMessageIdImpl; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.MessageImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.api.proto.CommandSubscribe; import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition; import org.apache.pulsar.common.api.proto.CommandSubscribe.SubType; @@ -123,6 +125,7 @@ import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.api.proto.TxnAction; import org.apache.pulsar.common.events.EventsTopicNames; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.InactiveTopicDeleteMode; @@ -143,7 +146,6 @@ import org.apache.pulsar.common.policies.data.stats.TopicStatsImpl; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.schema.SchemaData; -import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; @@ -181,6 +183,7 @@ public class PersistentTopic extends AbstractTopic public boolean msgChunkPublished; private Optional dispatchRateLimiter = Optional.empty(); + private final Object dispatchRateLimiterLock = new Object(); private Optional subscribeRateLimiter = Optional.empty(); public volatile long delayedDeliveryTickTimeMillis = 1000; private final long backloggedCursorThresholdEntries; @@ -218,9 +221,6 @@ protected TopicStatsHelper initialValue() { @Getter protected final TransactionBuffer transactionBuffer; - private final LongAdder bytesOutFromRemovedSubscriptions = new LongAdder(); - private final LongAdder msgOutFromRemovedSubscriptions = new LongAdder(); - // Record the last time a data message (ie: not an internal Pulsar marker) is published on the topic private long lastDataMessagePublishedTimestamp = 0; @@ -249,11 +249,17 @@ public void reset() { } } - public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerService) throws NamingException { + public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerService) { super(topic, brokerService); this.ledger = ledger; - this.subscriptions = new ConcurrentOpenHashMap<>(16, 1); - this.replicators = new ConcurrentOpenHashMap<>(16, 1); + this.subscriptions = ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + this.replicators = ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); this.delayedDeliveryEnabled = brokerService.pulsar().getConfiguration().isDelayedDeliveryEnabled(); this.delayedDeliveryTickTimeMillis = brokerService.pulsar().getConfiguration().getDelayedDeliveryTickTimeMillis(); @@ -265,20 +271,8 @@ public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerS this.compactedTopic = new CompactedTopicImpl(brokerService.pulsar().getBookKeeperClient()); for (ManagedCursor cursor : ledger.getCursors()) { - if (cursor.getName().startsWith(replicatorPrefix)) { - String localCluster = brokerService.pulsar().getConfiguration().getClusterName(); - String remoteCluster = PersistentReplicator.getRemoteCluster(cursor.getName()); - boolean isReplicatorStarted = false; - try { - isReplicatorStarted = addReplicationCluster(remoteCluster, cursor, localCluster); - } catch (Exception e) { - log.warn("[{}] failed to start replication", topic, e); - } - if (!isReplicatorStarted) { - throw new NamingException( - PersistentTopic.this.getName() + " Failed to start replicator " + remoteCluster); - } - } else if (cursor.getName().equals(DEDUPLICATION_CURSOR_NAME)) { + if (cursor.getName().equals(DEDUPLICATION_CURSOR_NAME) + || cursor.getName().startsWith(replicatorPrefix)) { // This is not a regular subscription, we are going to // ignore it for now and let the message dedup logic to take care of it } else { @@ -309,7 +303,17 @@ public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerS @Override public CompletableFuture initialize() { - return brokerService.pulsar().getPulsarResources().getNamespaceResources() + List> futures = new ArrayList<>(); + futures.add(initTopicPolicy()); + for (ManagedCursor cursor : ledger.getCursors()) { + if (cursor.getName().startsWith(replicatorPrefix)) { + String localCluster = brokerService.pulsar().getConfiguration().getClusterName(); + String remoteCluster = PersistentReplicator.getRemoteCluster(cursor.getName()); + futures.add(addReplicationCluster(remoteCluster, cursor, localCluster)); + } + } + return FutureUtil.waitForAll(futures).thenCompose(__ -> + brokerService.pulsar().getPulsarResources().getNamespaceResources() .getPoliciesAsync(TopicName.get(topic).getNamespaceObject()) .thenAccept(optPolicies -> { if (!optPolicies.isPresent()) { @@ -338,7 +342,7 @@ public CompletableFuture initialize() { updateUnackedMessagesAppliedOnSubscription(null); updateUnackedMessagesExceededOnConsumer(null); return null; - }); + })); } // for testing purposes @@ -348,8 +352,14 @@ public CompletableFuture initialize() { super(topic, brokerService); this.ledger = ledger; this.messageDeduplication = messageDeduplication; - this.subscriptions = new ConcurrentOpenHashMap<>(16, 1); - this.replicators = new ConcurrentOpenHashMap<>(16, 1); + this.subscriptions = ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + this.replicators = ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); this.compactedTopic = new CompactedTopicImpl(brokerService.pulsar().getBookKeeperClient()); this.backloggedCursorThresholdEntries = brokerService.pulsar().getConfiguration().getManagedLedgerCursorBackloggedThreshold(); @@ -363,7 +373,7 @@ public CompletableFuture initialize() { } private void initializeRateLimiterIfNeeded(Optional policies) { - synchronized (dispatchRateLimiter) { + synchronized (dispatchRateLimiterLock) { // dispatch rate limiter for topic if (!dispatchRateLimiter.isPresent() && DispatchRateLimiter .isDispatchRateNeeded(brokerService, policies, topic, Type.TOPIC)) { @@ -601,14 +611,15 @@ public void updatePropertiesFailed(ManagedLedgerException exception, Object ctx) } private boolean hasRemoteProducers() { - AtomicBoolean foundRemote = new AtomicBoolean(false); - producers.values().forEach(producer -> { + if (producers.isEmpty()) { + return false; + } + for (Producer producer : producers.values()) { if (producer.isRemote()) { - foundRemote.set(true); + return true; } - }); - - return foundRemote.get(); + } + return false; } public CompletableFuture startReplProducers() { @@ -745,7 +756,7 @@ public CompletableFuture subscribe(final TransportCnx cnx, String subs getDurableSubscription(subscriptionName, initialPosition, startMessageRollbackDurationSec, replicatedSubscriptionState) : getNonDurableSubscription(subscriptionName, startMessageId, initialPosition, - startMessageRollbackDurationSec); + startMessageRollbackDurationSec, readCompacted); int maxUnackedMessages = isDurable ? getMaxUnackedMessagesOnConsumer() @@ -793,6 +804,16 @@ public CompletableFuture subscribe(final TransportCnx cnx, String subs if (ex.getCause() instanceof ConsumerBusyException) { log.warn("[{}][{}] Consumer {} {} already connected", topic, subscriptionName, consumerId, consumerName); + Consumer consumer = null; + try { + consumer = subscriptionFuture.isDone() ? getActiveConsumer(subscriptionFuture.get()) : null; + // cleanup consumer if connection is already closed + if (consumer != null && !consumer.cnx().isActive()) { + consumer.close(); + } + } catch (Exception be) { + log.error("Failed to clean up consumer on closed connection {}, {}", consumer, be.getMessage()); + } } else if (ex.getCause() instanceof SubscriptionBusyException) { log.warn("[{}][{}] {}", topic, subscriptionName, ex.getMessage()); } else { @@ -891,7 +912,8 @@ public void openCursorFailed(ManagedLedgerException exception, Object ctx) { } private CompletableFuture getNonDurableSubscription(String subscriptionName, - MessageId startMessageId, InitialPosition initialPosition, long startMessageRollbackDurationSec) { + MessageId startMessageId, InitialPosition initialPosition, long startMessageRollbackDurationSec, + boolean isReadCompacted) { log.info("[{}][{}] Creating non-durable subscription at msg id {}", topic, subscriptionName, startMessageId); CompletableFuture subscriptionFuture = new CompletableFuture<>(); @@ -924,7 +946,8 @@ private CompletableFuture getNonDurableSubscription(Stri Position startPosition = new PositionImpl(ledgerId, entryId); ManagedCursor cursor = null; try { - cursor = ledger.newNonDurableCursor(startPosition, subscriptionName, initialPosition); + cursor = ledger.newNonDurableCursor(startPosition, subscriptionName, initialPosition, + isReadCompacted); } catch (ManagedLedgerException e) { return FutureUtil.failedFuture(e); } @@ -980,27 +1003,32 @@ public CompletableFuture createSubscription(String subscriptionNam @Override public CompletableFuture unsubscribe(String subscriptionName) { CompletableFuture unsubscribeFuture = new CompletableFuture<>(); - getBrokerService().getManagedLedgerFactory().asyncDelete(TopicName.get(MLPendingAckStore - .getTransactionPendingAckStoreSuffix(topic, - Codec.encode(subscriptionName))).getPersistenceNamingEncoding(), - new AsyncCallbacks.DeleteLedgerCallback() { - @Override - public void deleteLedgerComplete(Object ctx) { - asyncDeleteCursor(subscriptionName, unsubscribeFuture); - } - @Override - public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { - if (exception instanceof MetadataNotFoundException) { - asyncDeleteCursor(subscriptionName, unsubscribeFuture); - return; - } + if (brokerService.pulsar().getConfiguration().isTransactionCoordinatorEnabled()) { + getBrokerService().getManagedLedgerFactory().asyncDelete(TopicName.get(MLPendingAckStore + .getTransactionPendingAckStoreSuffix(topic, + Codec.encode(subscriptionName))).getPersistenceNamingEncoding(), + new AsyncCallbacks.DeleteLedgerCallback() { + @Override + public void deleteLedgerComplete(Object ctx) { + asyncDeleteCursor(subscriptionName, unsubscribeFuture); + } - unsubscribeFuture.completeExceptionally(exception); - log.error("[{}][{}] Error deleting subscription pending ack store", - topic, subscriptionName, exception); - } - }, null); + @Override + public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) { + if (exception instanceof MetadataNotFoundException) { + asyncDeleteCursor(subscriptionName, unsubscribeFuture); + return; + } + + unsubscribeFuture.completeExceptionally(exception); + log.error("[{}][{}] Error deleting subscription pending ack store", + topic, subscriptionName, exception); + } + }, null); + } else { + asyncDeleteCursor(subscriptionName, unsubscribeFuture); + } return unsubscribeFuture; } @@ -1023,6 +1051,11 @@ public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) { log.debug("[{}][{}] Error deleting cursor for subscription", topic, subscriptionName, exception); } + if (exception instanceof ManagedLedgerException.ManagedLedgerNotFoundException) { + unsubscribeFuture.complete(null); + lastActive = System.nanoTime(); + return; + } unsubscribeFuture.completeExceptionally(new PersistenceException(exception)); } }, null); @@ -1030,10 +1063,12 @@ public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) { void removeSubscription(String subscriptionName) { PersistentSubscription sub = subscriptions.remove(subscriptionName); - // preserve accumulative stats form removed subscription - SubscriptionStatsImpl stats = sub.getStats(false, false); - bytesOutFromRemovedSubscriptions.add(stats.bytesOutCounter); - msgOutFromRemovedSubscriptions.add(stats.msgOutCounter); + if (sub != null) { + // preserve accumulative stats form removed subscription + SubscriptionStatsImpl stats = sub.getStats(false, false); + bytesOutFromRemovedSubscriptions.add(stats.bytesOutCounter); + msgOutFromRemovedSubscriptions.add(stats.msgOutCounter); + } } /** @@ -1123,11 +1158,14 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, // 2. We want to kick out everyone and forcefully delete the topic. // In this case, we shouldn't care if the usageCount is 0 or not, just proceed if (currentUsageCount() == 0 || (closeIfClientsConnected && !failIfHasSubscriptions)) { - CompletableFuture deleteSchemaFuture = - deleteSchema ? deleteSchema() : CompletableFuture.completedFuture(null); - - deleteSchemaFuture.thenAccept(__ -> deleteTopicPolicies()) - .thenCompose(__ -> transactionBuffer.clearSnapshot()).whenComplete((v, ex) -> { + CompletableFuture deleteTopicAuthenticationFuture = new CompletableFuture<>(); + brokerService.deleteTopicAuthenticationWithRetry(topic, deleteTopicAuthenticationFuture, 5); + + deleteTopicAuthenticationFuture.thenCompose( + __ -> deleteSchema ? deleteSchema() : CompletableFuture.completedFuture(null)) + .thenCompose(__ -> deleteTopicPolicies()) + .thenCompose(__ -> transactionBufferCleanupAndClose()) + .whenComplete((v, ex) -> { if (ex != null) { log.error("[{}] Error deleting topic", topic, ex); unfenceTopicToResume(); @@ -1145,14 +1183,14 @@ private CompletableFuture delete(boolean failIfHasSubscriptions, ledger.asyncDelete(new AsyncCallbacks.DeleteLedgerCallback() { @Override public void deleteLedgerComplete(Object ctx) { - brokerService.removeTopicFromCache(topic); + brokerService.removeTopicFromCache(PersistentTopic.this); dispatchRateLimiter.ifPresent(DispatchRateLimiter::close); subscribeRateLimiter.ifPresent(SubscribeRateLimiter::close); - brokerService.pulsar().getTopicPoliciesService() - .clean(TopicName.get(topic)); + unregisterTopicPolicyListener(); + log.info("[{}] Topic deleted", topic); deleteFuture.complete(null); } @@ -1228,11 +1266,7 @@ public CompletableFuture close(boolean closeWithoutWaitingClientDisconnect replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect())); producers.values().forEach(producer -> futures.add(producer.disconnect())); if (topicPublishRateLimiter != null) { - try { - topicPublishRateLimiter.close(); - } catch (Exception e) { - log.warn("Error closing topicPublishRateLimiter for topic {}", topic, e); - } + topicPublishRateLimiter.close(); } subscriptions.forEach((s, sub) -> futures.add(sub.disconnect())); if (this.resourceGroupPublishLimiter != null) { @@ -1249,29 +1283,13 @@ public CompletableFuture close(boolean closeWithoutWaitingClientDisconnect @Override public void closeComplete(Object ctx) { // Everything is now closed, remove the topic from map - brokerService.removeTopicFromCache(topic) - .thenRun(() -> { - replicatedSubscriptionsController.ifPresent(ReplicatedSubscriptionsController::close); - - dispatchRateLimiter.ifPresent(DispatchRateLimiter::close); - - subscribeRateLimiter.ifPresent(SubscribeRateLimiter::close); - - brokerService.pulsar().getTopicPoliciesService().clean(TopicName.get(topic)); - log.info("[{}] Topic closed", topic); - closeFuture.complete(null); - }) - .exceptionally(ex -> { - closeFuture.completeExceptionally(ex); - return null; - }); + disposeTopic(closeFuture); } @Override public void closeFailed(ManagedLedgerException exception, Object ctx) { log.error("[{}] Failed to close managed ledger, proceeding anyway.", topic, exception); - brokerService.removeTopicFromCache(topic); - closeFuture.complete(null); + disposeTopic(closeFuture); } }, null); }).exceptionally(exception -> { @@ -1284,6 +1302,26 @@ public void closeFailed(ManagedLedgerException exception, Object ctx) { return closeFuture; } + private void disposeTopic(CompletableFuture closeFuture) { + brokerService.removeTopicFromCache(topic) + .thenRun(() -> { + replicatedSubscriptionsController.ifPresent(ReplicatedSubscriptionsController::close); + + dispatchRateLimiter.ifPresent(DispatchRateLimiter::close); + + subscribeRateLimiter.ifPresent(SubscribeRateLimiter::close); + + unregisterTopicPolicyListener(); + log.info("[{}] Topic closed", topic); + cancelFencedTopicMonitoringTask(); + closeFuture.complete(null); + }) + .exceptionally(ex -> { + closeFuture.completeExceptionally(ex); + return null; + }); + } + @VisibleForTesting CompletableFuture checkReplicationAndRetryOnFailure() { CompletableFuture result = new CompletableFuture(); @@ -1329,6 +1367,10 @@ public CompletableFuture checkReplication() { if (!name.isGlobal()) { return CompletableFuture.completedFuture(null); } + NamespaceName heartbeatNamespace = brokerService.pulsar().getHeartbeatNamespaceV2(); + if (name.getNamespaceObject().equals(heartbeatNamespace)) { + return CompletableFuture.completedFuture(null); + } if (log.isDebugEnabled()) { log.debug("[{}] Checking replication status", name); @@ -1519,13 +1561,13 @@ CompletableFuture startReplicator(String remoteCluster) { @Override public void openCursorComplete(ManagedCursor cursor, Object ctx) { String localCluster = brokerService.pulsar().getConfiguration().getClusterName(); - boolean isReplicatorStarted = addReplicationCluster(remoteCluster, cursor, localCluster); - if (isReplicatorStarted) { - future.complete(null); - } else { - future.completeExceptionally(new NamingException( - PersistentTopic.this.getName() + " Failed to start replicator " + remoteCluster)); - } + addReplicationCluster(remoteCluster, cursor, localCluster).whenComplete((__, ex) -> { + if (ex == null) { + future.complete(null); + } else { + future.completeExceptionally(ex); + } + }); } @Override @@ -1538,23 +1580,29 @@ public void openCursorFailed(ManagedLedgerException exception, Object ctx) { return future; } - protected boolean addReplicationCluster(String remoteCluster, ManagedCursor cursor, String localCluster) { - AtomicBoolean isReplicatorStarted = new AtomicBoolean(true); - replicators.computeIfAbsent(remoteCluster, r -> { - try { - return new PersistentReplicator(PersistentTopic.this, cursor, localCluster, remoteCluster, - brokerService); - } catch (NamingException | PulsarServerException e) { - isReplicatorStarted.set(false); - log.error("[{}] Replicator startup failed due to partitioned-topic {}", topic, remoteCluster); - } - return null; - }); - // clean up replicator if startup is failed - if (!isReplicatorStarted.get()) { - replicators.remove(remoteCluster); - } - return isReplicatorStarted.get(); + protected CompletableFuture addReplicationCluster(String remoteCluster, ManagedCursor cursor, + String localCluster) { + return AbstractReplicator.validatePartitionedTopicAsync(PersistentTopic.this.getName(), brokerService) + .thenCompose(__ -> brokerService.pulsar().getPulsarResources().getClusterResources() + .getClusterAsync(remoteCluster) + .thenApply(clusterData -> + brokerService.getReplicationClient(remoteCluster, clusterData))) + .thenAccept(replicationClient -> { + Replicator replicator = replicators.computeIfAbsent(remoteCluster, r -> { + try { + return new PersistentReplicator(PersistentTopic.this, cursor, localCluster, + remoteCluster, brokerService, (PulsarClientImpl) replicationClient); + } catch (PulsarServerException e) { + log.error("[{}] Replicator startup failed {}", topic, remoteCluster, e); + } + return null; + }); + + // clean up replicator if startup is failed + if (replicator == null) { + replicators.removeNullValue(remoteCluster); + } + }); } CompletableFuture removeReplicator(String remoteCluster) { @@ -1633,7 +1681,7 @@ public ManagedLedger getManagedLedger() { public void updateRates(NamespaceStats nsStats, NamespaceBundleStats bundleStats, StatsOutputStream topicStatsStream, ClusterReplicationMetrics replStats, String namespace, boolean hydratePublishers) { - + this.publishRateLimitedTimes = 0; TopicStatsHelper topicStatsHelper = threadLocalTopicStats.get(); topicStatsHelper.reset(); @@ -1746,6 +1794,7 @@ public void updateRates(NamespaceStats nsStats, NamespaceBundleStats bundleStats double subMsgRateOut = 0; double subMsgThroughputOut = 0; double subMsgRateRedeliver = 0; + double subMsgAckRate = 0; // Start subscription name & consumers try { @@ -1759,6 +1808,7 @@ public void updateRates(NamespaceStats nsStats, NamespaceBundleStats bundleStats ConsumerStatsImpl consumerStats = consumer.getStats(); subMsgRateOut += consumerStats.msgRateOut; + subMsgAckRate += consumerStats.messageAckRate; subMsgThroughputOut += consumerStats.msgThroughputOut; subMsgRateRedeliver += consumerStats.msgRateRedeliver; @@ -1773,6 +1823,7 @@ public void updateRates(NamespaceStats nsStats, NamespaceBundleStats bundleStats subscription.getNumberOfEntriesInBacklog(true)); topicStatsStream.writePair("msgRateExpired", subscription.getExpiredMessageRate()); topicStatsStream.writePair("msgRateOut", subMsgRateOut); + topicStatsStream.writePair("messageAckRate", subMsgAckRate); topicStatsStream.writePair("msgThroughputOut", subMsgThroughputOut); topicStatsStream.writePair("msgRateRedeliver", subMsgRateRedeliver); topicStatsStream.writePair("numberOfEntriesSinceFirstNotAckedMessage", @@ -1882,6 +1933,7 @@ public TopicStatsImpl getStats(boolean getPreciseBacklog, boolean subscriptionBa stats.waitingPublishers = getWaitingProducersCount(); stats.bytesOutCounter = bytesOutFromRemovedSubscriptions.longValue(); stats.msgOutCounter = msgOutFromRemovedSubscriptions.longValue(); + stats.publishRateLimitedTimes = publishRateLimitedTimes; subscriptions.forEach((name, subscription) -> { SubscriptionStatsImpl subStats = subscription.getStats(getPreciseBacklog, subscriptionBacklogSize); @@ -1937,12 +1989,7 @@ public TopicStatsImpl getStats(boolean getPreciseBacklog, boolean subscriptionBa } private Optional getCompactorMXBean() { - Compactor compactor = null; - try { - compactor = brokerService.pulsar().getCompactor(false); - } catch (PulsarServerException ex) { - log.warn("get compactor error", ex); - } + Compactor compactor = brokerService.pulsar().getNullableCompactor(); return Optional.ofNullable(compactor).map(c -> c.getStats()); } @@ -1970,39 +2017,43 @@ public CompletableFuture getInternalStats(boolean stats.state = ml.getState().toString(); stats.ledgers = Lists.newArrayList(); - List> futures = includeLedgerMetadata ? Lists.newArrayList() : null; + Set> futures = Sets.newConcurrentHashSet(); CompletableFuture> availableBookiesFuture = brokerService.pulsar().getPulsarResources().getBookieResources().listAvailableBookiesAsync(); - availableBookiesFuture.whenComplete((bookies, e) -> { - if (e != null) { - log.error("[{}] Failed to fetch available bookies.", topic, e); - statFuture.completeExceptionally(e); - } else { - ml.getLedgersInfo().forEach((id, li) -> { - LedgerInfo info = new LedgerInfo(); - info.ledgerId = li.getLedgerId(); - info.entries = li.getEntries(); - info.size = li.getSize(); - info.offloaded = li.hasOffloadContext() && li.getOffloadContext().getComplete(); - stats.ledgers.add(info); - if (futures != null) { - futures.add(ml.getLedgerMetadata(li.getLedgerId()).handle((lMetadata, ex) -> { - if (ex == null) { - info.metadata = lMetadata; - } - return null; - })); - futures.add(ml.getEnsemblesAsync(li.getLedgerId()).handle((ensembles, ex) -> { - if (ex == null) { - info.underReplicated = !bookies.containsAll(ensembles.stream().map(BookieId::toString) - .collect(Collectors.toList())); + futures.add( + availableBookiesFuture + .whenComplete((bookies, e) -> { + if (e != null) { + log.error("[{}] Failed to fetch available bookies.", topic, e); + statFuture.completeExceptionally(e); + } else { + ml.getLedgersInfo().forEach((id, li) -> { + LedgerInfo info = new LedgerInfo(); + info.ledgerId = li.getLedgerId(); + info.entries = li.getEntries(); + info.size = li.getSize(); + info.offloaded = li.hasOffloadContext() && li.getOffloadContext().getComplete(); + stats.ledgers.add(info); + if (includeLedgerMetadata) { + futures.add(ml.getLedgerMetadata(li.getLedgerId()).handle((lMetadata, ex) -> { + if (ex == null) { + info.metadata = lMetadata; + } + return null; + })); + futures.add(ml.getEnsemblesAsync(li.getLedgerId()).handle((ensembles, ex) -> { + if (ex == null) { + info.underReplicated = + !bookies.containsAll(ensembles.stream().map(BookieId::toString) + .collect(Collectors.toList())); + } + return null; + })); } - return null; - })); + }); } - }); - } - }); + }) + ); // Add ledger info for compacted topic ledger if exist. LedgerInfo info = new LedgerInfo(); @@ -2118,16 +2169,11 @@ public CompletableFuture getInternalStats(boolean } else { schemaStoreLedgersFuture.complete(null); } - schemaStoreLedgersFuture.thenRun(() -> { - if (futures != null) { - FutureUtil.waitForAll(futures).handle((res, ex) -> { - statFuture.complete(stats); - return null; - }); - } else { + schemaStoreLedgersFuture.thenRun(() -> + FutureUtil.waitForAll(futures).handle((res, ex) -> { statFuture.complete(stats); - } - }).exceptionally(e -> { + return null; + })).exceptionally(e -> { statFuture.completeExceptionally(e); return null; }); @@ -2246,42 +2292,48 @@ private CompletableFuture tryToDeletePartitionedMetadata() { return CompletableFuture.completedFuture(null); } TopicName topicName = TopicName.get(TopicName.get(topic).getPartitionedTopicName()); - try { - PartitionedTopicResources partitionedTopicResources = getBrokerService().pulsar().getPulsarResources() - .getNamespaceResources() - .getPartitionedTopicResources(); - if (topicName.isPartitioned() && !partitionedTopicResources.partitionedTopicExists(topicName)) { - return CompletableFuture.completedFuture(null); - } - CompletableFuture deleteMetadataFuture = new CompletableFuture<>(); - getBrokerService().fetchPartitionedTopicMetadataAsync(TopicName.get(topicName.getPartitionedTopicName())) - .thenAccept((metadata -> { - // make sure all sub partitions were deleted - for (int i = 0; i < metadata.partitions; i++) { - if (brokerService.getPulsar().getPulsarResources().getTopicResources() - .persistentTopicExists(topicName.getPartition(i)).join()) { - throw new UnsupportedOperationException(); - } - } - })) - .thenAccept((res) -> partitionedTopicResources.deletePartitionedTopicAsync(topicName) - .thenAccept((r) -> { - deleteMetadataFuture.complete(null); - }).exceptionally(ex -> { - deleteMetadataFuture.completeExceptionally(ex.getCause()); - return null; - })) - .exceptionally((e) -> { - if (!(e.getCause() instanceof UnsupportedOperationException)) { - log.error("delete metadata fail", e); - } - deleteMetadataFuture.complete(null); - return null; - }); - return deleteMetadataFuture; - } catch (Exception e) { - return FutureUtil.failedFuture(e); - } + PartitionedTopicResources partitionedTopicResources = getBrokerService().pulsar().getPulsarResources() + .getNamespaceResources() + .getPartitionedTopicResources(); + return partitionedTopicResources.partitionedTopicExistsAsync(topicName) + .thenCompose(partitionedTopicExist -> { + if (!partitionedTopicExist) { + return CompletableFuture.completedFuture(null); + } else { + return getBrokerService() + .fetchPartitionedTopicMetadataAsync(topicName) + .thenCompose((metadata -> { + List> persistentTopicExists = + new ArrayList<>(metadata.partitions); + for (int i = 0; i < metadata.partitions; i++) { + persistentTopicExists.add(brokerService.getPulsar() + .getPulsarResources().getTopicResources() + .persistentTopicExists(topicName.getPartition(i))); + } + List> unmodifiablePersistentTopicExists = + Collections.unmodifiableList(persistentTopicExists); + return FutureUtil.waitForAll(unmodifiablePersistentTopicExists) + .thenCompose(unused -> { + // make sure all sub partitions were deleted after all future complete + Optional anyExistPartition = unmodifiablePersistentTopicExists + .stream() + .map(CompletableFuture::join) + .filter(topicExist -> topicExist) + .findAny(); + if (anyExistPartition.isPresent()) { + log.error("[{}] Delete topic metadata failed because" + + " another partition exist.", topicName); + throw new UnsupportedOperationException( + String.format("Another partition exists for [%s].", + topicName)); + } else { + return partitionedTopicResources + .deletePartitionedTopicAsync(topicName); + } + }); + })); + } + }); } @Override @@ -2410,40 +2462,52 @@ public CompletableFuture onPoliciesUpdate(Policies data) { this.updateMaxPublishRate(data); - producers.values().forEach(producer -> { - producer.checkPermissions(); - producer.checkEncryption(); - }); - subscriptions.forEach((subName, sub) -> { - sub.getConsumers().forEach(Consumer::checkPermissions); - Dispatcher dispatcher = sub.getDispatcher(); - // If the topic-level policy already exists, the namespace-level policy cannot override - // the topic-level policy. - if (dispatcher != null - && (!topicPolicies.isPresent() || !topicPolicies.get().isSubscriptionDispatchRateSet())) { - dispatcher.getRateLimiter().ifPresent(rateLimiter -> rateLimiter.onPoliciesUpdate(data)); - } - }); - replicators.forEach((name, replicator) -> - replicator.getRateLimiter().ifPresent(DispatchRateLimiter::updateDispatchRate) - ); - checkMessageExpiry(); - CompletableFuture replicationFuture = checkReplicationAndRetryOnFailure(); - CompletableFuture dedupFuture = checkDeduplicationStatus(); - CompletableFuture persistentPoliciesFuture = checkPersistencePolicies(); - // update rate-limiter if policies updated - if (this.dispatchRateLimiter.isPresent()) { - if (!topicPolicies.isPresent() || !topicPolicies.get().isDispatchRateSet()) { - dispatchRateLimiter.get().onPoliciesUpdate(data); - } - } - if (this.subscribeRateLimiter.isPresent()) { - subscribeRateLimiter.get().onPoliciesUpdate(data); - } + List> producerCheckFutures = new ArrayList<>(producers.size()); + producers.values().forEach(producer -> producerCheckFutures.add( + producer.checkPermissionsAsync().thenRun(producer::checkEncryption))); + + return FutureUtil.waitForAll(producerCheckFutures).thenCompose((__) -> { + List> subscriptionCheckFutures = new ArrayList<>((int) subscriptions.size()); + subscriptions.forEach((subName, sub) -> { + List> consumerCheckFutures = new ArrayList<>(sub.getConsumers().size()); + sub.getConsumers().forEach(consumer -> consumerCheckFutures.add(consumer.checkPermissionsAsync())); + subscriptionCheckFutures.add(FutureUtil.waitForAll(consumerCheckFutures).thenRun(() -> { + Dispatcher dispatcher = sub.getDispatcher(); + // If the topic-level policy already exists, the namespace-level policy cannot override + // the topic-level policy. + if (dispatcher != null && (!topicPolicies.isPresent() || !topicPolicies.get() + .isSubscriptionDispatchRateSet())) { + dispatcher.getRateLimiter() + .ifPresent(rateLimiter -> rateLimiter.onPoliciesUpdate(data)); + } + })); + }); + return FutureUtil.waitForAll(subscriptionCheckFutures).thenCompose((___) -> { + replicators.forEach((name, replicator) -> + replicator.getRateLimiter().ifPresent(DispatchRateLimiter::updateDispatchRate) + ); + checkMessageExpiry(); + CompletableFuture replicationFuture = checkReplicationAndRetryOnFailure(); + CompletableFuture dedupFuture = checkDeduplicationStatus(); + CompletableFuture persistentPoliciesFuture = checkPersistencePolicies(); + // update rate-limiter if policies updated + if (this.dispatchRateLimiter.isPresent()) { + if (!topicPolicies.isPresent() || !topicPolicies.get().isDispatchRateSet()) { + dispatchRateLimiter.get().onPoliciesUpdate(data); + } + } + if (this.subscribeRateLimiter.isPresent()) { + subscribeRateLimiter.get().onPoliciesUpdate(data); + } - return CompletableFuture.allOf(replicationFuture, dedupFuture, persistentPoliciesFuture, - preCreateSubscriptionForCompactionIfNeeded()); + return CompletableFuture.allOf(replicationFuture, dedupFuture, persistentPoliciesFuture, + preCreateSubscriptionForCompactionIfNeeded()); + }); + }).exceptionally(ex -> { + log.error("[{}] update namespace polices : {} error", this.getName(), data, ex); + throw FutureUtil.wrapToCompletionException(ex); + }); } /** @@ -2558,22 +2622,9 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { return false; } } else { - Long ledgerId = ((ManagedCursorContainer) ledger.getCursors()).getSlowestReaderPosition().getLedgerId(); + PositionImpl slowestPosition = ((ManagedCursorContainer) ledger.getCursors()).getSlowestReaderPosition(); try { - org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo - ledgerInfo = ledger.getLedgerInfo(ledgerId).get(); - if (ledgerInfo != null && ledgerInfo.hasTimestamp() && ledgerInfo.getTimestamp() > 0 - && ((ManagedLedgerImpl) ledger).getClock().millis() - ledgerInfo.getTimestamp() - > backlogQuotaLimitInSecond * 1000) { - if (log.isDebugEnabled()) { - log.debug("Time based backlog quota exceeded, quota {}, age of ledger " - + "slowest cursor currently on {}", backlogQuotaLimitInSecond * 1000, - ((ManagedLedgerImpl) ledger).getClock().millis() - ledgerInfo.getTimestamp()); - } - return true; - } else { - return false; - } + return slowestReaderTimeBasedBacklogQuotaCheck(slowestPosition); } catch (Exception e) { log.error("[{}][{}] Error reading entry for precise time based backlog check", topicName, e); return false; @@ -2581,6 +2632,36 @@ public void readEntryFailed(ManagedLedgerException exception, Object ctx) { } } + private boolean slowestReaderTimeBasedBacklogQuotaCheck(PositionImpl slowestPosition) + throws ExecutionException, InterruptedException { + int backlogQuotaLimitInSecond = getBacklogQuota(BacklogQuota.BacklogQuotaType.message_age).getLimitTime(); + Long ledgerId = slowestPosition.getLedgerId(); + if (((ManagedLedgerImpl) ledger).getLedgersInfo().lastKey().equals(ledgerId)) { + return false; + } + int result; + org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo + ledgerInfo = ledger.getLedgerInfo(ledgerId).get(); + if (ledgerInfo != null && ledgerInfo.hasTimestamp() && ledgerInfo.getTimestamp() > 0 + && ((ManagedLedgerImpl) ledger).getClock().millis() - ledgerInfo.getTimestamp() + > backlogQuotaLimitInSecond * 1000 && (result = slowestPosition.compareTo( + new PositionImpl(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1))) <= 0) { + if (result < 0) { + if (log.isDebugEnabled()) { + log.debug("Time based backlog quota exceeded, quota {}, age of ledger " + + "slowest cursor currently on {}", backlogQuotaLimitInSecond * 1000, + ((ManagedLedgerImpl) ledger).getClock().millis() - ledgerInfo.getTimestamp()); + } + return true; + } else { + return slowestReaderTimeBasedBacklogQuotaCheck( + ((ManagedLedgerImpl) ledger).getNextValidPosition(slowestPosition)); + } + } else { + return false; + } + } + @Override public boolean isReplicated() { return !replicators.isEmpty(); @@ -2817,7 +2898,6 @@ private CompletableFuture getMessageTTL() { if (messageTtl.isPresent()) { return CompletableFuture.completedFuture(messageTtl.get()); } - TopicName name = TopicName.get(topic); return brokerService.pulsar().getPulsarResources().getNamespaceResources() .getPoliciesAsync(TopicName.get(topic).getNamespaceObject()) @@ -2908,6 +2988,18 @@ public boolean isSystemTopic() { return false; } + @Override + public boolean isPersistent() { + return true; + } + + private synchronized void cancelFencedTopicMonitoringTask() { + ScheduledFuture monitoringTask = this.fencedTopicMonitoringTask; + if (monitoringTask != null && !monitoringTask.isDone()) { + monitoringTask.cancel(false); + } + } + private synchronized void fence() { isFenced = true; ScheduledFuture monitoringTask = this.fencedTopicMonitoringTask; @@ -2922,10 +3014,7 @@ private synchronized void fence() { private synchronized void unfence() { isFenced = false; - ScheduledFuture monitoringTask = this.fencedTopicMonitoringTask; - if (monitoringTask != null && !monitoringTask.isDone()) { - monitoringTask.cancel(false); - } + cancelFencedTopicMonitoringTask(); } private void closeFencedTopicForcefully() { @@ -2965,8 +3054,7 @@ public void publishTxnMessage(TxnID txnID, ByteBuf headersAndPayload, PublishCon return; } if (isExceedMaximumMessageSize(headersAndPayload.readableBytes())) { - publishContext.completed(new NotAllowedException("Exceed maximum message size") - , -1, -1); + publishContext.completed(new NotAllowedException("Exceed maximum message size"), -1, -1); decrementPendingWriteOpsAndCheck(); return; } @@ -2986,6 +3074,14 @@ public void publishTxnMessage(TxnID txnID, ByteBuf headersAndPayload, PublishCon decrementPendingWriteOpsAndCheck(); }) .exceptionally(throwable -> { + throwable = throwable.getCause(); + if (throwable instanceof NotAllowedException) { + publishContext.completed((NotAllowedException) throwable, -1, -1); + decrementPendingWriteOpsAndCheck(); + return null; + } else if (!(throwable instanceof ManagedLedgerException)) { + throwable = new ManagedLedgerException(throwable); + } addFailed((ManagedLedgerException) throwable, publishContext); return null; }); @@ -3060,46 +3156,57 @@ public void onUpdate(TopicPolicies policies) { } }); - subscriptions.forEach((subName, sub) -> { - sub.getConsumers().forEach(Consumer::checkPermissions); - Dispatcher dispatcher = sub.getDispatcher(); - dispatcher.updateRateLimiter(policies.getSubscriptionDispatchRate()); - }); + List> consumerCheckFutures = new ArrayList<>(); + subscriptions.forEach((subName, sub) -> sub.getConsumers().forEach(consumer -> { + consumerCheckFutures.add(consumer.checkPermissionsAsync().thenRun(() -> { + Dispatcher dispatcher = sub.getDispatcher(); + if (dispatcher != null) { + dispatcher.updateRateLimiter(policies.getSubscriptionDispatchRate()); + } + })); + })); - if (policies.getPublishRate() != null) { - updatePublishDispatcher(policies.getPublishRate()); - } else { - updateMaxPublishRate(namespacePolicies.orElse(null)); - } + FutureUtil.waitForAll(consumerCheckFutures).thenRun(() -> { + if (policies.getPublishRate() != null) { + updatePublishDispatcher(policies.getPublishRate()); + } else { + updateMaxPublishRate(namespacePolicies.orElse(null)); + } - if (policies.isInactiveTopicPoliciesSet()) { - inactiveTopicPolicies = policies.getInactiveTopicPolicies(); - } else if (namespacePolicies.isPresent() && namespacePolicies.get().inactive_topic_policies != null) { - //topic-level policies is null , so use namespace-level - inactiveTopicPolicies = namespacePolicies.get().inactive_topic_policies; - } else { - //namespace-level policies is null , so use broker level - ServiceConfiguration cfg = brokerService.getPulsar().getConfiguration(); - resetInactiveTopicPolicies(cfg.getBrokerDeleteInactiveTopicsMode() - , cfg.getBrokerDeleteInactiveTopicsMaxInactiveDurationSeconds(), - cfg.isBrokerDeleteInactiveTopicsEnabled()); - } - updateUnackedMessagesAppliedOnSubscription(namespacePolicies.orElse(null)); - initializeTopicSubscribeRateLimiterIfNeeded(Optional.ofNullable(policies)); - if (this.subscribeRateLimiter.isPresent()) { - subscribeRateLimiter.ifPresent(subscribeRateLimiter -> - subscribeRateLimiter.onSubscribeRateUpdate(policies.getSubscribeRate())); - } - replicators.forEach((name, replicator) -> replicator.getRateLimiter() - .ifPresent(DispatchRateLimiter::updateDispatchRate)); - updateUnackedMessagesExceededOnConsumer(namespacePolicies.orElse(null)); + if (policies.isInactiveTopicPoliciesSet()) { + inactiveTopicPolicies = policies.getInactiveTopicPolicies(); + } else if (namespacePolicies.isPresent() && namespacePolicies.get().inactive_topic_policies != null) { + //topic-level policies is null , so use namespace-level + inactiveTopicPolicies = namespacePolicies.get().inactive_topic_policies; + } else { + //namespace-level policies is null , so use broker level + ServiceConfiguration cfg = brokerService.getPulsar().getConfiguration(); + resetInactiveTopicPolicies(cfg.getBrokerDeleteInactiveTopicsMode() + , cfg.getBrokerDeleteInactiveTopicsMaxInactiveDurationSeconds(), + cfg.isBrokerDeleteInactiveTopicsEnabled()); + } - checkDeduplicationStatus(); + updateUnackedMessagesAppliedOnSubscription(namespacePolicies.orElse(null)); + initializeTopicSubscribeRateLimiterIfNeeded(Optional.ofNullable(policies)); + if (this.subscribeRateLimiter.isPresent()) { + subscribeRateLimiter.ifPresent(subscribeRateLimiter -> + subscribeRateLimiter.onSubscribeRateUpdate(policies.getSubscribeRate())); + } + replicators.forEach((name, replicator) -> replicator.getRateLimiter() + .ifPresent(DispatchRateLimiter::updateDispatchRate)); + updateUnackedMessagesExceededOnConsumer(namespacePolicies.orElse(null)); + + checkDeduplicationStatus(); - preCreateSubscriptionForCompactionIfNeeded(); + preCreateSubscriptionForCompactionIfNeeded(); - // update managed ledger config - checkPersistencePolicies(); + // update managed ledger config + checkPersistencePolicies(); + }).exceptionally(e -> { + Throwable t = e instanceof CompletionException ? e.getCause() : e; + log.error("[{}] update topic policy error: {}", topic, t.getMessage(), t); + return null; + }); } private Optional getNamespacePolicies() { @@ -3107,7 +3214,7 @@ private Optional getNamespacePolicies() { } private void initializeTopicDispatchRateLimiterIfNeeded(TopicPolicies policies) { - synchronized (dispatchRateLimiter) { + synchronized (dispatchRateLimiterLock) { if (!dispatchRateLimiter.isPresent() && policies.getDispatchRate() != null) { this.dispatchRateLimiter = Optional.of(new DispatchRateLimiter(this, Type.TOPIC)); } @@ -3130,20 +3237,30 @@ private void initializeTopicSubscribeRateLimiterIfNeeded(Optional } } - private PersistentTopic getPersistentTopic() { - return this; + protected CompletableFuture initTopicPolicy() { + if (brokerService.pulsar().getConfig().isSystemTopicEnabled() + && brokerService.pulsar().getConfig().isTopicLevelPoliciesEnabled()) { + return CompletableFuture.completedFuture(null).thenRunAsync(() -> onUpdate( + brokerService.getPulsar().getTopicPoliciesService() + .getTopicPoliciesIfExists(TopicName.getPartitionedTopicName(topic))), + brokerService.getTopicOrderedExecutor()); + } + return CompletableFuture.completedFuture(null); } private void registerTopicPolicyListener() { if (brokerService.pulsar().getConfig().isSystemTopicEnabled() && brokerService.pulsar().getConfig().isTopicLevelPoliciesEnabled()) { - TopicName topicName = TopicName.get(topic); - TopicName cloneTopicName = topicName; - if (topicName.isPartitioned()) { - cloneTopicName = TopicName.get(topicName.getPartitionedTopicName()); - } + brokerService.getPulsar().getTopicPoliciesService() + .registerListener(TopicName.getPartitionedTopicName(topic), this); + } + } - brokerService.getPulsar().getTopicPoliciesService().registerListener(cloneTopicName, this); + private void unregisterTopicPolicyListener() { + if (brokerService.pulsar().getConfig().isSystemTopicEnabled() + && brokerService.pulsar().getConfig().isTopicLevelPoliciesEnabled()) { + brokerService.getPulsar().getTopicPoliciesService() + .unregisterListener(TopicName.getPartitionedTopicName(topic), this); } } @@ -3185,7 +3302,7 @@ public boolean checkSubscriptionTypesEnable(SubType subType) throws Exception { if (topicPolicies == null) { return checkNsAndBrokerSubscriptionTypesEnable(topicName, subType); } else { - if (topicPolicies.getSubscriptionTypesEnabled().isEmpty()) { + if (CollectionUtils.isEmpty(topicPolicies.getSubscriptionTypesEnabled())) { return checkNsAndBrokerSubscriptionTypesEnable(topicName, subType); } return topicPolicies.getSubscriptionTypesEnabled().contains(subType); @@ -3252,6 +3369,10 @@ public CompletableFuture getPendingAckManagedLedger(String subNam return subscription.getPendingAckManageLedger(); } + private CompletableFuture transactionBufferCleanupAndClose() { + return transactionBuffer.clearSnapshot().thenCompose(__ -> transactionBuffer.closeAsync()); + } + public long getLastDataMessagePublishedTimestamp() { return lastDataMessagePublishedTimestamp; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java index e0501ea187e39..2b1ae4ba19331 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/ReplicatedSubscriptionsController.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service.persistent; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import io.netty.buffer.ByteBuf; import io.prometheus.client.Gauge; import java.io.IOException; @@ -78,7 +79,7 @@ public ReplicatedSubscriptionsController(PersistentTopic topic, String localClus this.topic = topic; this.localCluster = localCluster; timer = topic.getBrokerService().pulsar().getExecutor() - .scheduleAtFixedRate(this::startNewSnapshot, 0, + .scheduleAtFixedRate(catchingAndLoggingThrowables(this::startNewSnapshot), 0, topic.getBrokerService().pulsar().getConfiguration() .getReplicatedSubscriptionsSnapshotFrequencyMillis(), TimeUnit.MILLISECONDS); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java index e1d39aee2f3cb..93707b4977b68 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SubscribeRateLimiter.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker.service.persistent; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.base.MoreObjects; import java.util.Objects; import java.util.Optional; @@ -271,7 +272,7 @@ public void close() { } private ScheduledFuture createTask() { - return executorService.scheduleAtFixedRate(this::closeAndClearRateLimiters, + return executorService.scheduleAtFixedRate(catchingAndLoggingThrowables(this::closeAndClearRateLimiters), this.subscribeRate.ratePeriodInSecond, this.subscribeRate.ratePeriodInSecond, TimeUnit.SECONDS); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java index aaf83a9fd689e..f1d7b0665f357 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/SystemTopic.java @@ -23,15 +23,18 @@ import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.service.BrokerService; -import org.apache.pulsar.broker.service.BrokerServiceException; public class SystemTopic extends PersistentTopic { - public SystemTopic(String topic, ManagedLedger ledger, BrokerService brokerService) - throws BrokerServiceException.NamingException, PulsarServerException { + public SystemTopic(String topic, ManagedLedger ledger, BrokerService brokerService) throws PulsarServerException { super(topic, ledger, brokerService); } + @Override + public boolean isDeleteWhileInactive() { + return false; + } + @Override public boolean isSizeBacklogExceeded() { return false; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java index 9cac21da67ddf..53157c7b1b2d7 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/BookkeeperSchemaStorage.java @@ -452,11 +452,14 @@ private CompletableFuture addNewSchemaEntryToS byte[] data ) { SchemaStorageFormat.SchemaEntry schemaEntry = newSchemaEntry(index, data); - return createLedger(schemaId).thenCompose(ledgerHandle -> - addEntry(ledgerHandle, schemaEntry).thenApply(entryId -> - Functions.newPositionInfo(ledgerHandle.getId(), entryId) - ) - ); + return createLedger(schemaId).thenCompose(ledgerHandle -> { + final long ledgerId = ledgerHandle.getId(); + return addEntry(ledgerHandle, schemaEntry) + .thenApply(entryId -> { + ledgerHandle.closeAsync(); + return Functions.newPositionInfo(ledgerId, entryId); + }); + }); } @NotNull diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryServiceImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryServiceImpl.java index 0eff36b54ce94..ff94aa32f2b8c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryServiceImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/schema/SchemaRegistryServiceImpl.java @@ -270,6 +270,9 @@ public CompletableFuture findSchemaVersion(String schemaId, SchemaData sch @Override public CompletableFuture checkConsumerCompatibility(String schemaId, SchemaData schemaData, SchemaCompatibilityStrategy strategy) { + if (SchemaCompatibilityStrategy.ALWAYS_COMPATIBLE == strategy) { + return CompletableFuture.completedFuture(null); + } return getSchema(schemaId).thenCompose(existingSchema -> { if (existingSchema != null && !existingSchema.schema.isDeleted()) { if (strategy == SchemaCompatibilityStrategy.BACKWARD @@ -504,6 +507,7 @@ static SchemaData schemaInfoToSchema(SchemaRegistryFormat.SchemaInfo info) { .user(info.getUser()) .type(convertToDomainType(info.getType())) .data(info.getSchema().toByteArray()) + .timestamp(info.getTimestamp()) .isDeleted(info.getDeleted()) .props(toMap(info.getPropsList())) .build(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReader.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReader.java index 12f5600c51a14..835269ca9d8c9 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReader.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReader.java @@ -37,7 +37,7 @@ import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.bookkeeper.mledger.util.SafeRun; import org.apache.pulsar.broker.service.persistent.PersistentTopic; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionNotSealedException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.client.impl.Backoff; /** @@ -197,7 +197,8 @@ private void internalReadEntryFailed(ManagedLedgerException exception, Object ct PositionImpl readPosition = pendingReadEntryRequest.position; pendingReadEntryRequest.retry++; long waitTimeMillis = readFailureBackoff.next(); - if (exception.getCause() instanceof TransactionNotSealedException) { + if (exception.getCause() instanceof TransactionBufferException.TransactionNotSealedException + || exception.getCause() instanceof ManagedLedgerException.OffloadReadHandleClosedException) { waitTimeMillis = 1; if (log.isDebugEnabled()) { log.debug("[{}] Error reading transaction entries : {}, - Retrying to read in {} seconds", diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java index 1086563085b7f..6718f074c67b2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java @@ -35,7 +35,8 @@ public class ClusterReplicationMetrics { public ClusterReplicationMetrics(String localCluster, boolean metricsEnabled) { metricsList = new ArrayList<>(); this.localCluster = localCluster; - metricsMap = new ConcurrentOpenHashMap<>(); + metricsMap = ConcurrentOpenHashMap.newBuilder() + .build(); this.metricsEnabled = metricsEnabled; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedConsumerStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedConsumerStats.java index 8b6bf7d5c9691..0a4bd317df5de 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedConsumerStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedConsumerStats.java @@ -28,6 +28,8 @@ public class AggregatedConsumerStats { public double msgRateOut; + public double msgAckRate; + public double msgThroughputOut; public long availablePermits; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java index 8bacd0f582de2..1980af91b7b54 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedNamespaceStats.java @@ -33,6 +33,7 @@ public class AggregatedNamespaceStats { public double throughputIn; public double throughputOut; + public long messageAckRate; public long bytesInCounter; public long msgInCounter; public long bytesOutCounter; @@ -95,7 +96,7 @@ void updateStats(TopicStats stats) { stats.replicationStats.forEach((n, as) -> { AggregatedReplicationStats replStats = - replicationStats.computeIfAbsent(n, k -> new AggregatedReplicationStats()); + replicationStats.computeIfAbsent(n, k -> new AggregatedReplicationStats()); replStats.msgRateIn += as.msgRateIn; replStats.msgRateOut += as.msgRateOut; replStats.msgThroughputIn += as.msgThroughputIn; @@ -122,6 +123,7 @@ void updateStats(TopicStats stats) { consumerStats.blockedSubscriptionOnUnackedMsgs = v.blockedSubscriptionOnUnackedMsgs; consumerStats.msgRateRedeliver += v.msgRateRedeliver; consumerStats.unackedMessages += v.unackedMessages; + messageAckRate += v.msgAckRate; }); }); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedSubscriptionStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedSubscriptionStats.java index fb74daf419f59..ffb2f237e03ba 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedSubscriptionStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/AggregatedSubscriptionStats.java @@ -36,6 +36,8 @@ public class AggregatedSubscriptionStats { public double msgRateOut; + public double messageAckRate; + public double msgThroughputOut; public long msgDelayed; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java index 9df9ad4c7c624..3e67d92935020 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java @@ -19,13 +19,15 @@ package org.apache.pulsar.broker.stats.prometheus; import io.netty.util.concurrent.FastThreadLocal; +import java.util.HashMap; +import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.LongAdder; +import java.util.function.Function; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.client.LedgerHandle; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.impl.ManagedLedgerMBeanImpl; -import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; @@ -33,7 +35,6 @@ import org.apache.pulsar.common.policies.data.stats.ConsumerStatsImpl; import org.apache.pulsar.common.policies.data.stats.ReplicatorStatsImpl; import org.apache.pulsar.common.policies.data.stats.TopicStatsImpl; -import org.apache.pulsar.common.util.SimpleTextOutputStream; import org.apache.pulsar.compaction.CompactedTopicContext; import org.apache.pulsar.compaction.Compactor; import org.apache.pulsar.compaction.CompactorMXBean; @@ -41,77 +42,75 @@ @Slf4j public class NamespaceStatsAggregator { - private static FastThreadLocal localNamespaceStats = + private static final FastThreadLocal localNamespaceStats = new FastThreadLocal() { @Override - protected AggregatedNamespaceStats initialValue() throws Exception { + protected AggregatedNamespaceStats initialValue() { return new AggregatedNamespaceStats(); } }; - private static FastThreadLocal localTopicStats = new FastThreadLocal() { + private static final FastThreadLocal localTopicStats = new FastThreadLocal() { @Override - protected TopicStats initialValue() throws Exception { + protected TopicStats initialValue() { return new TopicStats(); } }; public static void generate(PulsarService pulsar, boolean includeTopicMetrics, boolean includeConsumerMetrics, - boolean includeProducerMetrics, boolean splitTopicAndPartitionIndexLabel, SimpleTextOutputStream stream) { + boolean includeProducerMetrics, boolean splitTopicAndPartitionIndexLabel, + PrometheusMetricStreams stream) { String cluster = pulsar.getConfiguration().getClusterName(); AggregatedNamespaceStats namespaceStats = localNamespaceStats.get(); - TopicStats.resetTypes(); TopicStats topicStats = localTopicStats.get(); + Optional compactorMXBean = getCompactorMXBean(pulsar); + LongAdder topicsCount = new LongAdder(); + Map localNamespaceTopicCount = new HashMap<>(); printDefaultBrokerStats(stream, cluster); - Optional compactorMXBean = getCompactorMXBean(pulsar); - LongAdder topicsCount = new LongAdder(); pulsar.getBrokerService().getMultiLayerTopicMap().forEach((namespace, bundlesMap) -> { namespaceStats.reset(); topicsCount.reset(); - bundlesMap.forEach((bundle, topicsMap) -> { - topicsMap.forEach((name, topic) -> { - getTopicStats(topic, topicStats, includeConsumerMetrics, includeProducerMetrics, - pulsar.getConfiguration().isExposePreciseBacklogInPrometheus(), - pulsar.getConfiguration().isExposeSubscriptionBacklogSizeInPrometheus(), - compactorMXBean - ); - - if (includeTopicMetrics) { - topicsCount.add(1); - TopicStats.printTopicStats(stream, cluster, namespace, name, topicStats, compactorMXBean, - splitTopicAndPartitionIndexLabel); - } else { - namespaceStats.updateStats(topicStats); - } - }); - }); + bundlesMap.forEach((bundle, topicsMap) -> topicsMap.forEach((name, topic) -> { + getTopicStats(topic, topicStats, includeConsumerMetrics, includeProducerMetrics, + pulsar.getConfiguration().isExposePreciseBacklogInPrometheus(), + pulsar.getConfiguration().isExposeSubscriptionBacklogSizeInPrometheus(), + compactorMXBean + ); + + if (includeTopicMetrics) { + topicsCount.add(1); + TopicStats.printTopicStats(stream, topicStats, compactorMXBean, cluster, namespace, name, + splitTopicAndPartitionIndexLabel); + } else { + namespaceStats.updateStats(topicStats); + } + })); if (!includeTopicMetrics) { - // Only include namespace level stats if we don't have the per-topic, otherwise we're going to report - // the same data twice, and it will make the aggregation difficult - printNamespaceStats(stream, cluster, namespace, namespaceStats); + // Only include namespace level stats if we don't have the per-topic, otherwise we're going to + // report the same data twice, and it will make the aggregation difficult + printNamespaceStats(stream, namespaceStats, cluster, namespace); } else { - printTopicsCountStats(stream, cluster, namespace, topicsCount); + localNamespaceTopicCount.put(namespace, topicsCount.sum()); } }); + + if (includeTopicMetrics) { + printTopicsCountStats(stream, localNamespaceTopicCount, cluster); + } } private static Optional getCompactorMXBean(PulsarService pulsar) { - Compactor compactor = null; - try { - compactor = pulsar.getCompactor(false); - } catch (PulsarServerException e) { - log.error("get compactor error", e); - } - return Optional.ofNullable(compactor).map(c -> c.getStats()); + Compactor compactor = pulsar.getNullableCompactor(); + return Optional.ofNullable(compactor).map(Compactor::getStats); } private static void getTopicStats(Topic topic, TopicStats stats, boolean includeConsumerMetrics, - boolean includeProducerMetrics, boolean getPreciseBacklog, boolean subscriptionBacklogSize, - Optional compactorMXBean) { + boolean includeProducerMetrics, boolean getPreciseBacklog, + boolean subscriptionBacklogSize, Optional compactorMXBean) { stats.reset(); if (topic instanceof PersistentTopic) { @@ -148,6 +147,7 @@ private static void getTopicStats(Topic topic, TopicStats stats, boolean include stats.msgOutCounter = tStatus.msgOutCounter; stats.bytesOutCounter = tStatus.bytesOutCounter; stats.averageMsgSize = tStatus.averageMsgSize; + stats.publishRateLimitedTimes = tStatus.publishRateLimitedTimes; stats.producersCount = 0; topic.getProducers().values().forEach(producer -> { @@ -195,6 +195,7 @@ private static void getTopicStats(Topic topic, TopicStats stats, boolean include subsStats.unackedMessages += cStats.unackedMessages; subsStats.msgRateRedeliver += cStats.msgRateRedeliver; subsStats.msgRateOut += cStats.msgRateOut; + subsStats.messageAckRate += cStats.messageAckRate; subsStats.msgThroughputOut += cStats.msgThroughputOut; subsStats.bytesOutCounter += cStats.bytesOutCounter; subsStats.msgOutCounter += cStats.msgOutCounter; @@ -220,6 +221,7 @@ private static void getTopicStats(Topic topic, TopicStats stats, boolean include consumerStats.unackedMessages = conStats.unackedMessages; consumerStats.msgRateRedeliver = conStats.msgRateRedeliver; consumerStats.msgRateOut = conStats.msgRateOut; + consumerStats.msgAckRate = conStats.messageAckRate; consumerStats.msgThroughputOut = conStats.msgThroughputOut; consumerStats.bytesOutCounter = conStats.bytesOutCounter; consumerStats.msgOutCounter = conStats.msgOutCounter; @@ -270,160 +272,174 @@ private static void getTopicStats(Topic topic, TopicStats stats, boolean include }); } - private static void printDefaultBrokerStats(SimpleTextOutputStream stream, String cluster) { + private static void printDefaultBrokerStats(PrometheusMetricStreams stream, String cluster) { // Print metrics with 0 values. This is necessary to have the available brokers being // reported in the brokers dashboard even if they don't have any topic or traffic - metric(stream, cluster, "pulsar_topics_count", 0); - metric(stream, cluster, "pulsar_subscriptions_count", 0); - metric(stream, cluster, "pulsar_producers_count", 0); - metric(stream, cluster, "pulsar_consumers_count", 0); - metric(stream, cluster, "pulsar_rate_in", 0); - metric(stream, cluster, "pulsar_rate_out", 0); - metric(stream, cluster, "pulsar_throughput_in", 0); - metric(stream, cluster, "pulsar_throughput_out", 0); - metric(stream, cluster, "pulsar_storage_size", 0); - metric(stream, cluster, "pulsar_storage_logical_size", 0); - metric(stream, cluster, "pulsar_storage_write_rate", 0); - metric(stream, cluster, "pulsar_storage_read_rate", 0); - metric(stream, cluster, "pulsar_msg_backlog", 0); + writeMetric(stream, "pulsar_topics_count", 0, cluster); + writeMetric(stream, "pulsar_subscriptions_count", 0, cluster); + writeMetric(stream, "pulsar_producers_count", 0, cluster); + writeMetric(stream, "pulsar_consumers_count", 0, cluster); + writeMetric(stream, "pulsar_rate_in", 0, cluster); + writeMetric(stream, "pulsar_rate_out", 0, cluster); + writeMetric(stream, "pulsar_throughput_in", 0, cluster); + writeMetric(stream, "pulsar_throughput_out", 0, cluster); + writeMetric(stream, "pulsar_storage_size", 0, cluster); + writeMetric(stream, "pulsar_storage_logical_size", 0, cluster); + writeMetric(stream, "pulsar_storage_write_rate", 0, cluster); + writeMetric(stream, "pulsar_storage_read_rate", 0, cluster); + writeMetric(stream, "pulsar_msg_backlog", 0, cluster); } - private static void printTopicsCountStats(SimpleTextOutputStream stream, String cluster, String namespace, - LongAdder topicsCount) { - metric(stream, cluster, namespace, "pulsar_topics_count", topicsCount.sum()); + private static void printTopicsCountStats(PrometheusMetricStreams stream, Map namespaceTopicsCount, + String cluster) { + namespaceTopicsCount.forEach( + (ns, topicCount) -> writeMetric(stream, "pulsar_topics_count", topicCount, cluster, ns) + ); } - private static void printNamespaceStats(SimpleTextOutputStream stream, String cluster, String namespace, - AggregatedNamespaceStats stats) { - metric(stream, cluster, namespace, "pulsar_topics_count", stats.topicsCount); - metric(stream, cluster, namespace, "pulsar_subscriptions_count", stats.subscriptionsCount); - metric(stream, cluster, namespace, "pulsar_producers_count", stats.producersCount); - metric(stream, cluster, namespace, "pulsar_consumers_count", stats.consumersCount); - - metric(stream, cluster, namespace, "pulsar_rate_in", stats.rateIn); - metric(stream, cluster, namespace, "pulsar_rate_out", stats.rateOut); - metric(stream, cluster, namespace, "pulsar_throughput_in", stats.throughputIn); - metric(stream, cluster, namespace, "pulsar_throughput_out", stats.throughputOut); - - metric(stream, cluster, namespace, "pulsar_in_bytes_total", stats.bytesInCounter); - metric(stream, cluster, namespace, "pulsar_in_messages_total", stats.msgInCounter); - metric(stream, cluster, namespace, "pulsar_out_bytes_total", stats.bytesOutCounter); - metric(stream, cluster, namespace, "pulsar_out_messages_total", stats.msgOutCounter); - - metric(stream, cluster, namespace, "pulsar_storage_size", stats.managedLedgerStats.storageSize); - metric(stream, cluster, namespace, "pulsar_storage_logical_size", stats.managedLedgerStats.storageLogicalSize); - metric(stream, cluster, namespace, "pulsar_storage_backlog_size", stats.managedLedgerStats.backlogSize); - metric(stream, cluster, namespace, "pulsar_storage_offloaded_size", - stats.managedLedgerStats.offloadedStorageUsed); - - metric(stream, cluster, namespace, "pulsar_storage_write_rate", stats.managedLedgerStats.storageWriteRate); - metric(stream, cluster, namespace, "pulsar_storage_read_rate", stats.managedLedgerStats.storageReadRate); - - metric(stream, cluster, namespace, "pulsar_subscription_delayed", stats.msgDelayed); - - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_msg_backlog", "local", stats.msgBacklog); + private static void printNamespaceStats(PrometheusMetricStreams stream, AggregatedNamespaceStats stats, + String cluster, String namespace) { + writeMetric(stream, "pulsar_topics_count", stats.topicsCount, cluster, namespace); + writeMetric(stream, "pulsar_subscriptions_count", stats.subscriptionsCount, cluster, + namespace); + writeMetric(stream, "pulsar_producers_count", stats.producersCount, cluster, namespace); + writeMetric(stream, "pulsar_consumers_count", stats.consumersCount, cluster, namespace); + + writeMetric(stream, "pulsar_rate_in", stats.rateIn, cluster, namespace); + writeMetric(stream, "pulsar_rate_out", stats.rateOut, cluster, namespace); + writeMetric(stream, "pulsar_throughput_in", stats.throughputIn, cluster, namespace); + writeMetric(stream, "pulsar_throughput_out", stats.throughputOut, cluster, namespace); + writeMetric(stream, "pulsar_consumer_msg_ack_rate", stats.messageAckRate, cluster, namespace); + + writeMetric(stream, "pulsar_in_bytes_total", stats.bytesInCounter, cluster, namespace); + writeMetric(stream, "pulsar_in_messages_total", stats.msgInCounter, cluster, namespace); + writeMetric(stream, "pulsar_out_bytes_total", stats.bytesOutCounter, cluster, namespace); + writeMetric(stream, "pulsar_out_messages_total", stats.msgOutCounter, cluster, namespace); + + writeMetric(stream, "pulsar_storage_size", stats.managedLedgerStats.storageSize, cluster, + namespace); + writeMetric(stream, "pulsar_storage_logical_size", + stats.managedLedgerStats.storageLogicalSize, cluster, namespace); + writeMetric(stream, "pulsar_storage_backlog_size", stats.managedLedgerStats.backlogSize, cluster, + namespace); + writeMetric(stream, "pulsar_storage_offloaded_size", + stats.managedLedgerStats.offloadedStorageUsed, cluster, namespace); + + writeMetric(stream, "pulsar_storage_write_rate", stats.managedLedgerStats.storageWriteRate, + cluster, namespace); + writeMetric(stream, "pulsar_storage_read_rate", stats.managedLedgerStats.storageReadRate, + cluster, namespace); + + writeMetric(stream, "pulsar_subscription_delayed", stats.msgDelayed, cluster, namespace); + + writePulsarMsgBacklog(stream, stats.msgBacklog, cluster, namespace); stats.managedLedgerStats.storageWriteLatencyBuckets.refresh(); long[] latencyBuckets = stats.managedLedgerStats.storageWriteLatencyBuckets.getBuckets(); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_0_5", latencyBuckets[0]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_1", latencyBuckets[1]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_5", latencyBuckets[2]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_10", latencyBuckets[3]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_20", latencyBuckets[4]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_50", latencyBuckets[5]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_100", latencyBuckets[6]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_200", latencyBuckets[7]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_1000", latencyBuckets[8]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_overflow", latencyBuckets[9]); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_count", - stats.managedLedgerStats.storageWriteLatencyBuckets.getCount()); - metric(stream, cluster, namespace, "pulsar_storage_write_latency_sum", - stats.managedLedgerStats.storageWriteLatencyBuckets.getSum()); + writeMetric(stream, "pulsar_storage_write_latency_le_0_5", latencyBuckets[0], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_1", latencyBuckets[1], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_5", latencyBuckets[2], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_10", latencyBuckets[3], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_20", latencyBuckets[4], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_50", latencyBuckets[5], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_100", latencyBuckets[6], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_200", latencyBuckets[7], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_le_1000", latencyBuckets[8], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_overflow", latencyBuckets[9], cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_count", + stats.managedLedgerStats.storageWriteLatencyBuckets.getCount(), cluster, namespace); + writeMetric(stream, "pulsar_storage_write_latency_sum", + stats.managedLedgerStats.storageWriteLatencyBuckets.getSum(), cluster, namespace); stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.refresh(); - long[] ledgerWritelatencyBuckets = stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getBuckets(); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_0_5", ledgerWritelatencyBuckets[0]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_1", ledgerWritelatencyBuckets[1]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_5", ledgerWritelatencyBuckets[2]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_10", ledgerWritelatencyBuckets[3]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_20", ledgerWritelatencyBuckets[4]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_50", ledgerWritelatencyBuckets[5]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_100", ledgerWritelatencyBuckets[6]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_200", ledgerWritelatencyBuckets[7]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_le_1000", ledgerWritelatencyBuckets[8]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_overflow", - ledgerWritelatencyBuckets[9]); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_count", - stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getCount()); - metric(stream, cluster, namespace, "pulsar_storage_ledger_write_latency_sum", - stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getSum()); + long[] ledgerWriteLatencyBuckets = stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getBuckets(); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_0_5", ledgerWriteLatencyBuckets[0], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_1", ledgerWriteLatencyBuckets[1], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_5", ledgerWriteLatencyBuckets[2], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_10", ledgerWriteLatencyBuckets[3], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_20", ledgerWriteLatencyBuckets[4], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_50", ledgerWriteLatencyBuckets[5], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_100", ledgerWriteLatencyBuckets[6], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_200", ledgerWriteLatencyBuckets[7], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_1000", ledgerWriteLatencyBuckets[8], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_overflow", ledgerWriteLatencyBuckets[9], + cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_count", + stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getCount(), cluster, namespace); + writeMetric(stream, "pulsar_storage_ledger_write_latency_sum", + stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getSum(), cluster, namespace); stats.managedLedgerStats.entrySizeBuckets.refresh(); long[] entrySizeBuckets = stats.managedLedgerStats.entrySizeBuckets.getBuckets(); - metric(stream, cluster, namespace, "pulsar_entry_size_le_128", entrySizeBuckets[0]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_512", entrySizeBuckets[1]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_1_kb", entrySizeBuckets[2]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_2_kb", entrySizeBuckets[3]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_4_kb", entrySizeBuckets[4]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_16_kb", entrySizeBuckets[5]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_100_kb", entrySizeBuckets[6]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7]); - metric(stream, cluster, namespace, "pulsar_entry_size_le_overflow", entrySizeBuckets[8]); - metric(stream, cluster, namespace, "pulsar_entry_size_count", - stats.managedLedgerStats.entrySizeBuckets.getCount()); - metric(stream, cluster, namespace, "pulsar_entry_size_sum", - stats.managedLedgerStats.entrySizeBuckets.getSum()); - - if (!stats.replicationStats.isEmpty()) { - stats.replicationStats.forEach((remoteCluster, replStats) -> { - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_rate_in", remoteCluster, - replStats.msgRateIn); - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_rate_out", remoteCluster, - replStats.msgRateOut); - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_throughput_in", remoteCluster, - replStats.msgThroughputIn); - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_throughput_out", remoteCluster, - replStats.msgThroughputOut); - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_backlog", remoteCluster, - replStats.replicationBacklog); - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_connected_count", remoteCluster, - replStats.connectedCount); - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_rate_expired", remoteCluster, - replStats.msgRateExpired); - metricWithRemoteCluster(stream, cluster, namespace, "pulsar_replication_delay_in_seconds", - remoteCluster, replStats.replicationDelayInSeconds); - }); - } + writeMetric(stream, "pulsar_entry_size_le_128", entrySizeBuckets[0], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_512", entrySizeBuckets[1], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_1_kb", entrySizeBuckets[2], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_2_kb", entrySizeBuckets[3], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_4_kb", entrySizeBuckets[4], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_16_kb", entrySizeBuckets[5], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_100_kb", entrySizeBuckets[6], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_le_overflow", entrySizeBuckets[8], cluster, namespace); + writeMetric(stream, "pulsar_entry_size_count", stats.managedLedgerStats.entrySizeBuckets.getCount(), + cluster, namespace); + writeMetric(stream, "pulsar_entry_size_sum", stats.managedLedgerStats.entrySizeBuckets.getSum(), + cluster, namespace); + + writeReplicationStat(stream, "pulsar_replication_rate_in", stats, + replStats -> replStats.msgRateIn, cluster, namespace); + writeReplicationStat(stream, "pulsar_replication_rate_out", stats, + replStats -> replStats.msgRateOut, cluster, namespace); + writeReplicationStat(stream, "pulsar_replication_throughput_in", stats, + replStats -> replStats.msgThroughputIn, cluster, namespace); + writeReplicationStat(stream, "pulsar_replication_throughput_out", stats, + replStats -> replStats.msgThroughputOut, cluster, namespace); + writeReplicationStat(stream, "pulsar_replication_backlog", stats, + replStats -> replStats.replicationBacklog, cluster, namespace); + writeReplicationStat(stream, "pulsar_replication_connected_count", stats, + replStats -> replStats.connectedCount, cluster, namespace); + writeReplicationStat(stream, "pulsar_replication_rate_expired", stats, + replStats -> replStats.msgRateExpired, cluster, namespace); + writeReplicationStat(stream, "pulsar_replication_delay_in_seconds", stats, + replStats -> replStats.replicationDelayInSeconds, cluster, namespace); } - private static void metric(SimpleTextOutputStream stream, String cluster, String name, - long value) { - TopicStats.metricType(stream, name); - stream.write(name) - .write("{cluster=\"").write(cluster).write("\"} ") - .write(value).write(' ').write(System.currentTimeMillis()) - .write('\n'); + private static void writePulsarMsgBacklog(PrometheusMetricStreams stream, Number value, + String cluster, String namespace) { + stream.writeSample("pulsar_msg_backlog", value, "cluster", cluster, "namespace", namespace, + "remote_cluster", + "local"); } - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String name, - long value) { - TopicStats.metricType(stream, name); - stream.write(name).write("{cluster=\"").write(cluster).write("\",namespace=\"").write(namespace).write("\"} "); - stream.write(value).write(' ').write(System.currentTimeMillis()).write('\n'); + private static void writeMetric(PrometheusMetricStreams stream, String metricName, Number value, + String cluster) { + stream.writeSample(metricName, value, "cluster", cluster); } - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String name, - double value) { - TopicStats.metricType(stream, name); - stream.write(name).write("{cluster=\"").write(cluster).write("\",namespace=\"").write(namespace).write("\"} "); - stream.write(value).write(' ').write(System.currentTimeMillis()).write('\n'); + private static void writeMetric(PrometheusMetricStreams stream, String metricName, Number value, String cluster, + String namespace) { + stream.writeSample(metricName, value, "cluster", cluster, "namespace", namespace); } - private static void metricWithRemoteCluster(SimpleTextOutputStream stream, String cluster, String namespace, - String name, String remoteCluster, double value) { - TopicStats.metricType(stream, name); - stream.write(name).write("{cluster=\"").write(cluster).write("\",namespace=\"").write(namespace); - stream.write("\",remote_cluster=\"").write(remoteCluster).write("\"} "); - stream.write(value).write(' ').write(System.currentTimeMillis()).write('\n'); + private static void writeReplicationStat(PrometheusMetricStreams stream, String metricName, + AggregatedNamespaceStats namespaceStats, + Function sampleValueFunction, + String cluster, String namespace) { + if (!namespaceStats.replicationStats.isEmpty()) { + namespaceStats.replicationStats.forEach((remoteCluster, replStats) -> + stream.writeSample(metricName, sampleValueFunction.apply(replStats), + "cluster", cluster, + "namespace", namespace, + "remote_cluster", remoteCluster) + ); + } } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreams.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreams.java new file mode 100644 index 0000000000000..6b6b972c175f0 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreams.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.stats.prometheus; + +import java.util.HashMap; +import java.util.Map; +import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; +import org.apache.pulsar.common.util.SimpleTextOutputStream; + +/** + * Helper class to ensure that metrics of the same name are grouped together under the same TYPE header when written. + * Those are the requirements of the + * Prometheus Exposition Format. + */ +public class PrometheusMetricStreams { + private final Map metricStreamMap = new HashMap<>(); + + /** + * Write the given metric and sample value to the stream. Will write #TYPE header if metric not seen before. + * @param metricName name of the metric. + * @param value value of the sample + * @param labelsAndValuesArray varargs of label and label value + */ + void writeSample(String metricName, Number value, String... labelsAndValuesArray) { + SimpleTextOutputStream stream = initGaugeType(metricName); + stream.write(metricName).write('{'); + for (int i = 0; i < labelsAndValuesArray.length; i += 2) { + stream.write(labelsAndValuesArray[i]).write("=\"").write(labelsAndValuesArray[i + 1]).write('\"'); + if (i + 2 != labelsAndValuesArray.length) { + stream.write(','); + } + } + stream.write("} ").write(value).write(' ').write(System.currentTimeMillis()).write('\n'); + } + + /** + * Flush all the stored metrics to the supplied stream. + * @param stream the stream to write to. + */ + void flushAllToStream(SimpleTextOutputStream stream) { + metricStreamMap.values().forEach(s -> stream.write(s.getBuffer())); + } + + /** + * Release all the streams to clean up resources. + */ + void releaseAll() { + metricStreamMap.values().forEach(s -> s.getBuffer().release()); + metricStreamMap.clear(); + } + + private SimpleTextOutputStream initGaugeType(String metricName) { + return metricStreamMap.computeIfAbsent(metricName, s -> { + SimpleTextOutputStream stream = new SimpleTextOutputStream(PulsarByteBufAllocator.DEFAULT.directBuffer()); + stream.write("# TYPE ").write(metricName).write(" gauge\n"); + return stream; + }); + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGenerator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGenerator.java index 9d5e1c77c69a4..aa5822c826aa5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGenerator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsGenerator.java @@ -52,7 +52,8 @@ /** * Generate metrics aggregated at the namespace level and optionally at a topic level and formats them out * in a text format suitable to be consumed by Prometheus. - * Format specification can be found at {@link https://prometheus.io/docs/instrumenting/exposition_formats/} + * Format specification can be found at Exposition Formats */ public class PrometheusMetricsGenerator { @@ -86,38 +87,43 @@ public double get() { } public static void generate(PulsarService pulsar, boolean includeTopicMetrics, boolean includeConsumerMetrics, - boolean includeProducerMetrics, OutputStream out) throws IOException { + boolean includeProducerMetrics, OutputStream out) throws IOException { generate(pulsar, includeTopicMetrics, includeConsumerMetrics, includeProducerMetrics, false, out, null); } public static void generate(PulsarService pulsar, boolean includeTopicMetrics, boolean includeConsumerMetrics, - boolean includeProducerMetrics, boolean splitTopicAndPartitionIndexLabel, - OutputStream out) throws IOException { + boolean includeProducerMetrics, boolean splitTopicAndPartitionIndexLabel, + OutputStream out) throws IOException { generate(pulsar, includeTopicMetrics, includeConsumerMetrics, includeProducerMetrics, splitTopicAndPartitionIndexLabel, out, null); } public static void generate(PulsarService pulsar, boolean includeTopicMetrics, boolean includeConsumerMetrics, - boolean includeProducerMetrics, boolean splitTopicAndPartitionIndexLabel, OutputStream out, - List metricsProviders) - throws IOException { + boolean includeProducerMetrics, boolean splitTopicAndPartitionIndexLabel, + OutputStream out, + List metricsProviders) + throws IOException { ByteBuf buf = ByteBufAllocator.DEFAULT.heapBuffer(); + //Used in namespace/topic and transaction aggregators as share metric names + PrometheusMetricStreams metricStreams = new PrometheusMetricStreams(); try { SimpleTextOutputStream stream = new SimpleTextOutputStream(buf); generateSystemMetrics(stream, pulsar.getConfiguration().getClusterName()); NamespaceStatsAggregator.generate(pulsar, includeTopicMetrics, includeConsumerMetrics, - includeProducerMetrics, splitTopicAndPartitionIndexLabel, stream); + includeProducerMetrics, splitTopicAndPartitionIndexLabel, metricStreams); if (pulsar.getWorkerServiceOpt().isPresent()) { pulsar.getWorkerService().generateFunctionsStats(stream); } if (pulsar.getConfiguration().isTransactionCoordinatorEnabled()) { - TransactionAggregator.generate(pulsar, stream, includeTopicMetrics); + TransactionAggregator.generate(pulsar, metricStreams, includeTopicMetrics); } + metricStreams.flushAllToStream(stream); + generateBrokerBasicMetrics(pulsar, stream); generateManagedLedgerBookieClientMetrics(pulsar, stream); @@ -129,6 +135,8 @@ public static void generate(PulsarService pulsar, boolean includeTopicMetrics, b } out.write(buf.array(), buf.arrayOffset(), buf.readableBytes()); } finally { + //release all the metrics buffers + metricStreams.releaseAll(); buf.release(); } } @@ -142,17 +150,17 @@ private static void generateBrokerBasicMetrics(PulsarService pulsar, SimpleTextO if (pulsar.getConfiguration().isExposeManagedLedgerMetricsInPrometheus()) { // generate managedLedger metrics parseMetricsToPrometheusMetrics(new ManagedLedgerMetrics(pulsar).generate(), - clusterName, Collector.Type.GAUGE, stream); + clusterName, Collector.Type.GAUGE, stream); } if (pulsar.getConfiguration().isExposeManagedCursorMetricsInPrometheus()) { // generate managedCursor metrics parseMetricsToPrometheusMetrics(new ManagedCursorMetrics(pulsar).generate(), - clusterName, Collector.Type.GAUGE, stream); + clusterName, Collector.Type.GAUGE, stream); } parseMetricsToPrometheusMetrics(Collections.singletonList(pulsar.getBrokerService() - .getPulsarStats().getBrokerOperabilityMetrics().generateConnectionMetrics()), + .getPulsarStats().getBrokerOperabilityMetrics().generateConnectionMetrics()), clusterName, Collector.Type.GAUGE, stream); // generate loadBalance metrics @@ -242,7 +250,9 @@ private static void generateSystemMetrics(SimpleTextOutputStream stream, String for (int i = 0; i < metricFamily.samples.size(); i++) { Sample sample = metricFamily.samples.get(i); stream.write(sample.name); - stream.write("{cluster=\"").write(cluster).write('"'); + if (!sample.labelNames.contains("cluster")) { + stream.write("{cluster=\"").write(cluster).write('"'); + } for (int j = 0; j < sample.labelNames.size(); j++) { String labelValue = sample.labelValues.get(j); if (labelValue != null) { @@ -265,17 +275,17 @@ private static void generateSystemMetrics(SimpleTextOutputStream stream, String static String getTypeStr(Collector.Type type) { switch (type) { - case COUNTER: - return "counter"; - case GAUGE: - return "gauge"; - case SUMMARY : - return "summary"; - case HISTOGRAM: - return "histogram"; - case UNTYPED: - default: - return "untyped"; + case COUNTER: + return "counter"; + case GAUGE: + return "gauge"; + case SUMMARY: + return "summary"; + case HISTOGRAM: + return "histogram"; + case UNTYPED: + default: + return "untyped"; } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsServlet.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsServlet.java index 145f7a744cd9e..3c9498cd11399 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsServlet.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricsServlet.java @@ -73,12 +73,11 @@ protected void doGet(HttpServletRequest request, HttpServletResponse response) HttpServletResponse res = (HttpServletResponse) context.getResponse(); try { res.setStatus(HttpStatus.OK_200); - res.setContentType("text/plain"); + res.setContentType("text/plain;charset=utf-8"); PrometheusMetricsGenerator.generate(pulsar, shouldExportTopicMetrics, shouldExportConsumerMetrics, shouldExportProducerMetrics, splitTopicAndPartitionLabel, res.getOutputStream(), metricsProviders); context.complete(); - } catch (Exception e) { log.error("Failed to generate prometheus stats", e); res.setStatus(HttpStatus.INTERNAL_SERVER_ERROR_500); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TopicStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TopicStats.java index bfa427e5e3a7c..9ac2f04eae488 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TopicStats.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TopicStats.java @@ -23,12 +23,12 @@ import java.util.Map; import java.util.Optional; import org.apache.bookkeeper.mledger.util.StatsBuckets; -import org.apache.pulsar.common.util.SimpleTextOutputStream; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.pulsar.broker.service.Consumer; import org.apache.pulsar.compaction.CompactionRecord; import org.apache.pulsar.compaction.CompactorMXBean; class TopicStats { - int subscriptionsCount; int producersCount; int consumersCount; @@ -43,6 +43,7 @@ class TopicStats { double averageMsgSize; public long msgBacklog; + long publishRateLimitedTimes; long backlogQuotaLimit; long backlogQuotaLimitTime; @@ -53,9 +54,6 @@ class TopicStats { Map subscriptionStats = new HashMap<>(); Map producerStats = new HashMap<>(); - // Used for tracking duplicate TYPE definitions - static Map metricWithTypeDefinition = new HashMap<>(); - // For compaction long compactionRemovedEventCount; long compactionSucceedCount; @@ -82,6 +80,7 @@ public void reset() { managedLedgerStats.reset(); msgBacklog = 0; + publishRateLimitedTimes = 0L; backlogQuotaLimit = 0; backlogQuotaLimitTime = -1; @@ -100,367 +99,340 @@ public void reset() { compactionLatencyBuckets.reset(); } - static void resetTypes() { - metricWithTypeDefinition.clear(); - } - - static void printTopicStats(SimpleTextOutputStream stream, String cluster, String namespace, String topic, - TopicStats stats, Optional compactorMXBean, - boolean splitTopicAndPartitionIndexLabel) { - metric(stream, cluster, namespace, topic, "pulsar_subscriptions_count", stats.subscriptionsCount, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_producers_count", stats.producersCount, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_consumers_count", stats.consumersCount, - splitTopicAndPartitionIndexLabel); - - metric(stream, cluster, namespace, topic, "pulsar_rate_in", stats.rateIn, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_rate_out", stats.rateOut, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_throughput_in", stats.throughputIn, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_throughput_out", stats.throughputOut, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_average_msg_size", stats.averageMsgSize, - splitTopicAndPartitionIndexLabel); - - metric(stream, cluster, namespace, topic, "pulsar_storage_size", stats.managedLedgerStats.storageSize, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_logical_size", - stats.managedLedgerStats.storageLogicalSize, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_msg_backlog", stats.msgBacklog, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_backlog_size", - stats.managedLedgerStats.backlogSize, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_offloaded_size", stats.managedLedgerStats - .offloadedStorageUsed, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_backlog_quota_limit", stats.backlogQuotaLimit, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_backlog_quota_limit_time", - stats.backlogQuotaLimitTime, splitTopicAndPartitionIndexLabel); + public static void printTopicStats(PrometheusMetricStreams stream, TopicStats stats, + Optional compactorMXBean, String cluster, String namespace, + String topic, boolean splitTopicAndPartitionIndexLabel) { + writeMetric(stream, "pulsar_subscriptions_count", stats.subscriptionsCount, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_producers_count", stats.producersCount, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_consumers_count", stats.consumersCount, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + + writeMetric(stream, "pulsar_rate_in", stats.rateIn, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_rate_out", stats.rateOut, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_throughput_in", stats.throughputIn, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_throughput_out", stats.throughputOut, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_average_msg_size", stats.averageMsgSize, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + + writeMetric(stream, "pulsar_storage_size", stats.managedLedgerStats.storageSize, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_logical_size", + stats.managedLedgerStats.storageLogicalSize, cluster, namespace, topic, + splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_msg_backlog", stats.msgBacklog, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_rate", stats.managedLedgerStats.storageWriteRate, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_read_rate", stats.managedLedgerStats.storageReadRate, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_backlog_size", stats.managedLedgerStats.backlogSize, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_publish_rate_limit_times", stats.publishRateLimitedTimes, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_offloaded_size", stats.managedLedgerStats + .offloadedStorageUsed, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_backlog_quota_limit", stats.backlogQuotaLimit, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_backlog_quota_limit_time", stats.backlogQuotaLimitTime, + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); long[] latencyBuckets = stats.managedLedgerStats.storageWriteLatencyBuckets.getBuckets(); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_0_5", latencyBuckets[0], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_1", latencyBuckets[1], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_5", latencyBuckets[2], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_10", latencyBuckets[3], + writeMetric(stream, "pulsar_storage_write_latency_le_0_5", + latencyBuckets[0], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_1", + latencyBuckets[1], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_5", + latencyBuckets[2], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_10", + latencyBuckets[3], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_20", + latencyBuckets[4], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_50", + latencyBuckets[5], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_100", + latencyBuckets[6], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_200", + latencyBuckets[7], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_le_1000", + latencyBuckets[8], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_overflow", + latencyBuckets[9], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_count", + stats.managedLedgerStats.storageWriteLatencyBuckets.getCount(), + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_write_latency_sum", + stats.managedLedgerStats.storageWriteLatencyBuckets.getSum(), cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_20", latencyBuckets[4], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_50", latencyBuckets[5], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_100", latencyBuckets[6], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_200", latencyBuckets[7], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_1000", latencyBuckets[8], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_overflow", latencyBuckets[9], - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_count", - stats.managedLedgerStats.storageWriteLatencyBuckets.getCount(), splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_sum", - stats.managedLedgerStats.storageWriteLatencyBuckets.getSum(), splitTopicAndPartitionIndexLabel); long[] ledgerWriteLatencyBuckets = stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getBuckets(); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_0_5", - ledgerWriteLatencyBuckets[0], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_1", - ledgerWriteLatencyBuckets[1], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_5", - ledgerWriteLatencyBuckets[2], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_10", - ledgerWriteLatencyBuckets[3], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_20", - ledgerWriteLatencyBuckets[4], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_50", - ledgerWriteLatencyBuckets[5], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_100", - ledgerWriteLatencyBuckets[6], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_200", - ledgerWriteLatencyBuckets[7], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_le_1000", - ledgerWriteLatencyBuckets[8], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_overflow", - ledgerWriteLatencyBuckets[9], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_count", + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_0_5", + ledgerWriteLatencyBuckets[0], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_1", + ledgerWriteLatencyBuckets[1], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_5", + ledgerWriteLatencyBuckets[2], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_10", + ledgerWriteLatencyBuckets[3], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_20", + ledgerWriteLatencyBuckets[4], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_50", + ledgerWriteLatencyBuckets[5], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_100", + ledgerWriteLatencyBuckets[6], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_200", + ledgerWriteLatencyBuckets[7], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_1000", + ledgerWriteLatencyBuckets[8], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_overflow", + ledgerWriteLatencyBuckets[9], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_count", stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getCount(), - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_storage_ledger_write_latency_sum", + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_storage_ledger_write_latency_sum", stats.managedLedgerStats.storageLedgerWriteLatencyBuckets.getSum(), - splitTopicAndPartitionIndexLabel); + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); long[] entrySizeBuckets = stats.managedLedgerStats.entrySizeBuckets.getBuckets(); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_128", entrySizeBuckets[0], + writeMetric(stream, "pulsar_entry_size_le_128", entrySizeBuckets[0], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_512", entrySizeBuckets[1], + writeMetric(stream, "pulsar_entry_size_le_512", entrySizeBuckets[1], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_1_kb", entrySizeBuckets[2], + writeMetric(stream, "pulsar_entry_size_le_1_kb", entrySizeBuckets[2], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_2_kb", entrySizeBuckets[3], + writeMetric(stream, "pulsar_entry_size_le_2_kb", entrySizeBuckets[3], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_4_kb", entrySizeBuckets[4], + writeMetric(stream, "pulsar_entry_size_le_4_kb", entrySizeBuckets[4], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_16_kb", entrySizeBuckets[5], + writeMetric(stream, "pulsar_entry_size_le_16_kb", entrySizeBuckets[5], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_100_kb", entrySizeBuckets[6], + writeMetric(stream, "pulsar_entry_size_le_100_kb", entrySizeBuckets[6], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7], + writeMetric(stream, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_overflow", entrySizeBuckets[8], + writeMetric(stream, "pulsar_entry_size_le_overflow", entrySizeBuckets[8], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_count", - stats.managedLedgerStats.entrySizeBuckets.getCount(), splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_entry_size_sum", - stats.managedLedgerStats.entrySizeBuckets.getSum(), splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_entry_size_count", stats.managedLedgerStats.entrySizeBuckets.getCount(), + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_entry_size_sum", stats.managedLedgerStats.entrySizeBuckets.getSum(), + cluster, namespace, topic, splitTopicAndPartitionIndexLabel); stats.producerStats.forEach((p, producerStats) -> { - metric(stream, cluster, namespace, topic, p, producerStats.producerId, "pulsar_producer_msg_rate_in", - producerStats.msgRateIn, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, p, producerStats.producerId, "pulsar_producer_msg_throughput_in", - producerStats.msgThroughputIn, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, p, producerStats.producerId, "pulsar_producer_msg_average_Size", - producerStats.averageMsgSize, splitTopicAndPartitionIndexLabel); + writeProducerMetric(stream, "pulsar_producer_msg_rate_in", producerStats.msgRateIn, + cluster, namespace, topic, p, producerStats.producerId, splitTopicAndPartitionIndexLabel); + writeProducerMetric(stream, "pulsar_producer_msg_throughput_in", producerStats.msgThroughputIn, + cluster, namespace, topic, p, producerStats.producerId, splitTopicAndPartitionIndexLabel); + writeProducerMetric(stream, "pulsar_producer_msg_average_Size", producerStats.averageMsgSize, + cluster, namespace, topic, p, producerStats.producerId, splitTopicAndPartitionIndexLabel); }); - stats.subscriptionStats.forEach((n, subsStats) -> { - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_back_log", - subsStats.msgBacklog, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_back_log_no_delayed", - subsStats.msgBacklogNoDelayed, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_delayed", - subsStats.msgDelayed, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_msg_rate_redeliver", - subsStats.msgRateRedeliver, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_unacked_messages", - subsStats.unackedMessages, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_blocked_on_unacked_messages", - subsStats.blockedSubscriptionOnUnackedMsgs ? 1 : 0, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_msg_rate_out", - subsStats.msgRateOut, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_msg_throughput_out", - subsStats.msgThroughputOut, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_out_bytes_total", - subsStats.bytesOutCounter, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_out_messages_total", - subsStats.msgOutCounter, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_last_expire_timestamp", - subsStats.lastExpireTimestamp, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_last_acked_timestamp", - subsStats.lastAckedTimestamp, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_last_consumed_flow_timestamp", - subsStats.lastConsumedFlowTimestamp, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_last_consumed_timestamp", - subsStats.lastConsumedTimestamp, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_last_mark_delete_advanced_timestamp", - subsStats.lastMarkDeleteAdvancedTimestamp, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_msg_rate_expired", - subsStats.msgRateExpired, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, "pulsar_subscription_total_msg_expired", - subsStats.totalMsgExpired, splitTopicAndPartitionIndexLabel); + stats.subscriptionStats.forEach((sub, subsStats) -> { + writeSubscriptionMetric(stream, "pulsar_subscription_back_log", subsStats.msgBacklog, + cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_back_log_no_delayed", + subsStats.msgBacklogNoDelayed, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_delayed", + subsStats.msgDelayed, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_msg_rate_redeliver", + subsStats.msgRateRedeliver, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_unacked_messages", + subsStats.unackedMessages, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_blocked_on_unacked_messages", + subsStats.blockedSubscriptionOnUnackedMsgs ? 1 : 0, cluster, namespace, topic, sub, + splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_msg_rate_out", + subsStats.msgRateOut, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_msg_ack_rate", + subsStats.messageAckRate, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_msg_throughput_out", + subsStats.msgThroughputOut, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_out_bytes_total", + subsStats.bytesOutCounter, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_out_messages_total", + subsStats.msgOutCounter, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_last_expire_timestamp", + subsStats.lastExpireTimestamp, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_last_acked_timestamp", + subsStats.lastAckedTimestamp, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_last_consumed_flow_timestamp", + subsStats.lastConsumedFlowTimestamp, cluster, namespace, topic, sub, + splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_last_consumed_timestamp", + subsStats.lastConsumedTimestamp, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_last_mark_delete_advanced_timestamp", + subsStats.lastMarkDeleteAdvancedTimestamp, cluster, namespace, topic, sub, + splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_msg_rate_expired", + subsStats.msgRateExpired, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + writeSubscriptionMetric(stream, "pulsar_subscription_total_msg_expired", + subsStats.totalMsgExpired, cluster, namespace, topic, sub, splitTopicAndPartitionIndexLabel); + subsStats.consumerStat.forEach((c, consumerStats) -> { - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_consumer_msg_rate_redeliver", consumerStats.msgRateRedeliver, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_consumer_unacked_messages", consumerStats.unackedMessages, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_consumer_blocked_on_unacked_messages", + writeConsumerMetric(stream, "pulsar_consumer_msg_rate_redeliver", consumerStats.msgRateRedeliver, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + writeConsumerMetric(stream, "pulsar_consumer_unacked_messages", consumerStats.unackedMessages, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + writeConsumerMetric(stream, "pulsar_consumer_blocked_on_unacked_messages", consumerStats.blockedSubscriptionOnUnackedMsgs ? 1 : 0, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_consumer_msg_rate_out", consumerStats.msgRateOut, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_consumer_msg_throughput_out", consumerStats.msgThroughputOut, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_consumer_available_permits", consumerStats.availablePermits, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_out_bytes_total", consumerStats.bytesOutCounter, - splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, n, c.consumerName(), c.consumerId(), - "pulsar_out_messages_total", consumerStats.msgOutCounter, - splitTopicAndPartitionIndexLabel); + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + writeConsumerMetric(stream, "pulsar_consumer_msg_rate_out", consumerStats.msgRateOut, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + + writeConsumerMetric(stream, "pulsar_consumer_msg_ack_rate", consumerStats.msgAckRate, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + + writeConsumerMetric(stream, "pulsar_consumer_msg_throughput_out", consumerStats.msgThroughputOut, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + writeConsumerMetric(stream, "pulsar_consumer_available_permits", consumerStats.availablePermits, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + writeConsumerMetric(stream, "pulsar_out_bytes_total", consumerStats.bytesOutCounter, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); + writeConsumerMetric(stream, "pulsar_out_messages_total", consumerStats.msgOutCounter, + cluster, namespace, topic, sub, c, splitTopicAndPartitionIndexLabel); }); }); if (!stats.replicationStats.isEmpty()) { stats.replicationStats.forEach((remoteCluster, replStats) -> { - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_rate_in", remoteCluster, - replStats.msgRateIn, splitTopicAndPartitionIndexLabel); - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_rate_out", remoteCluster, - replStats.msgRateOut, splitTopicAndPartitionIndexLabel); - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_throughput_in", - remoteCluster, replStats.msgThroughputIn, splitTopicAndPartitionIndexLabel); - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_throughput_out", - remoteCluster, replStats.msgThroughputOut, splitTopicAndPartitionIndexLabel); - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_backlog", remoteCluster, - replStats.replicationBacklog, splitTopicAndPartitionIndexLabel); - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_connected_count", - remoteCluster, replStats.connectedCount, splitTopicAndPartitionIndexLabel); - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_rate_expired", - remoteCluster, replStats.msgRateExpired, splitTopicAndPartitionIndexLabel); - metricWithRemoteCluster(stream, cluster, namespace, topic, "pulsar_replication_delay_in_seconds", - remoteCluster, replStats.replicationDelayInSeconds, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_rate_in", replStats.msgRateIn, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_rate_out", replStats.msgRateOut, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_throughput_in", replStats.msgThroughputIn, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_throughput_out", replStats.msgThroughputOut, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_backlog", replStats.replicationBacklog, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_connected_count", replStats.connectedCount, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_rate_expired", replStats.msgRateExpired, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_replication_delay_in_seconds", replStats.replicationDelayInSeconds, + cluster, namespace, topic, remoteCluster, splitTopicAndPartitionIndexLabel); }); } - metric(stream, cluster, namespace, topic, "pulsar_in_bytes_total", stats.bytesInCounter, + writeMetric(stream, "pulsar_in_bytes_total", stats.bytesInCounter, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_in_messages_total", stats.msgInCounter, + writeMetric(stream, "pulsar_in_messages_total", stats.msgInCounter, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); // Compaction boolean hasCompaction = compactorMXBean.flatMap(mxBean -> mxBean.getCompactionRecordForTopic(topic)) - .map(__ -> true).orElse(false); + .isPresent(); if (hasCompaction) { - metric(stream, cluster, namespace, topic, "pulsar_compaction_removed_event_count", - stats.compactionRemovedEventCount, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_succeed_count", - stats.compactionSucceedCount, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_failed_count", - stats.compactionFailedCount, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_duration_time_in_mills", - stats.compactionDurationTimeInMills, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_read_throughput", - stats.compactionReadThroughput, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_write_throughput", - stats.compactionWriteThroughput, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_compacted_entries_count", - stats.compactionCompactedEntriesCount, splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_compacted_entries_size", - stats.compactionCompactedEntriesSize, splitTopicAndPartitionIndexLabel); - long[] compactionLatencyBuckets = stats.compactionLatencyBuckets.getBuckets(); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_0_5", - compactionLatencyBuckets[0], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_1", - compactionLatencyBuckets[1], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_5", - compactionLatencyBuckets[2], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_10", - compactionLatencyBuckets[3], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_20", - compactionLatencyBuckets[4], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_50", - compactionLatencyBuckets[5], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_100", - compactionLatencyBuckets[6], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_200", - compactionLatencyBuckets[7], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_le_1000", - compactionLatencyBuckets[8], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_overflow", - compactionLatencyBuckets[9], splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_sum", - stats.compactionLatencyBuckets.getSum(), splitTopicAndPartitionIndexLabel); - metric(stream, cluster, namespace, topic, "pulsar_compaction_latency_count", - stats.compactionLatencyBuckets.getCount(), splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_removed_event_count", + stats.compactionRemovedEventCount, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_succeed_count", + stats.compactionSucceedCount, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_failed_count", + stats.compactionFailedCount, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_duration_time_in_mills", + stats.compactionDurationTimeInMills, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_read_throughput", + stats.compactionReadThroughput, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_write_throughput", + stats.compactionWriteThroughput, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_compacted_entries_count", + stats.compactionCompactedEntriesCount, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_compacted_entries_size", + stats.compactionCompactedEntriesSize, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + + long[] compactionBuckets = stats.compactionLatencyBuckets.getBuckets(); + writeMetric(stream, "pulsar_compaction_latency_le_0_5", + compactionBuckets[0], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_1", + compactionBuckets[1], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_5", + compactionBuckets[2], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_10", + compactionBuckets[3], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_20", + compactionBuckets[4], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_50", + compactionBuckets[5], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_100", + compactionBuckets[6], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_200", + compactionBuckets[7], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_le_1000", + compactionBuckets[8], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_overflow", + compactionBuckets[9], cluster, namespace, topic, splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_sum", + stats.compactionLatencyBuckets.getSum(), cluster, namespace, topic, + splitTopicAndPartitionIndexLabel); + writeMetric(stream, "pulsar_compaction_latency_count", + stats.compactionLatencyBuckets.getCount(), cluster, namespace, topic, + splitTopicAndPartitionIndexLabel); } } - static void metricType(SimpleTextOutputStream stream, String name) { - - if (!metricWithTypeDefinition.containsKey(name)) { - metricWithTypeDefinition.put(name, "gauge"); - stream.write("# TYPE ").write(name).write(" gauge\n"); - } - + private static void writeMetric(PrometheusMetricStreams stream, String metricName, Number value, String cluster, + String namespace, String topic, boolean splitTopicAndPartitionIndexLabel) { + writeTopicMetric(stream, metricName, value, cluster, namespace, topic, splitTopicAndPartitionIndexLabel); } - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String topic, - String name, double value, boolean splitTopicAndPartitionIndexLabel) { - metricType(stream, name); - appendRequiredLabels(stream, cluster, namespace, topic, name, splitTopicAndPartitionIndexLabel).write("\"} "); - stream.write(value); - appendEndings(stream); + private static void writeMetric(PrometheusMetricStreams stream, String metricName, Number value, String cluster, + String namespace, String topic, String remoteCluster, + boolean splitTopicAndPartitionIndexLabel) { + writeTopicMetric(stream, metricName, value, cluster, namespace, topic, splitTopicAndPartitionIndexLabel, + "remote_cluster", remoteCluster); } - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String topic, - String subscription, String name, long value, boolean splitTopicAndPartitionIndexLabel) { - metricType(stream, name); - appendRequiredLabels(stream, cluster, namespace, topic, name, splitTopicAndPartitionIndexLabel) - .write("\",subscription=\"").write(subscription).write("\"} "); - stream.write(value); - appendEndings(stream); + private static void writeProducerMetric(PrometheusMetricStreams stream, String metricName, Number value, + String cluster, String namespace, String topic, String producer, + long producerId, boolean splitTopicAndPartitionIndexLabel) { + writeTopicMetric(stream, metricName, value, cluster, namespace, topic, splitTopicAndPartitionIndexLabel, + "producer_name", producer, "producer_id", String.valueOf(producerId)); } - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String topic, - String producerName, long produceId, String name, double value, boolean splitTopicAndPartitionIndexLabel) { - metricType(stream, name); - appendRequiredLabels(stream, cluster, namespace, topic, name, splitTopicAndPartitionIndexLabel) - .write("\",producer_name=\"").write(producerName) - .write("\",producer_id=\"").write(produceId).write("\"} "); - stream.write(value); - appendEndings(stream); - } - - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String topic, - String subscription, String name, double value, boolean splitTopicAndPartitionIndexLabel) { - metricType(stream, name); - appendRequiredLabels(stream, cluster, namespace, topic, name, splitTopicAndPartitionIndexLabel) - .write("\",subscription=\"").write(subscription).write("\"} "); - stream.write(value); - appendEndings(stream); - } - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String topic, - String subscription, String consumerName, long consumerId, String name, long value, - boolean splitTopicAndPartitionIndexLabel) { - metricType(stream, name); - appendRequiredLabels(stream, cluster, namespace, topic, name, splitTopicAndPartitionIndexLabel) - .write("\",subscription=\"").write(subscription) - .write("\",consumer_name=\"").write(consumerName).write("\",consumer_id=\"").write(consumerId) - .write("\"} "); - stream.write(value); - appendEndings(stream); + private static void writeSubscriptionMetric(PrometheusMetricStreams stream, String metricName, Number value, + String cluster, String namespace, String topic, String subscription, + boolean splitTopicAndPartitionIndexLabel) { + writeTopicMetric(stream, metricName, value, cluster, namespace, topic, splitTopicAndPartitionIndexLabel, + "subscription", subscription); } - private static void metric(SimpleTextOutputStream stream, String cluster, String namespace, String topic, - String subscription, String consumerName, long consumerId, String name, double value, - boolean splitTopicAndPartitionIndexLabel) { - metricType(stream, name); - appendRequiredLabels(stream, cluster, namespace, topic, name, splitTopicAndPartitionIndexLabel) - .write("\",subscription=\"").write(subscription) - .write("\",consumer_name=\"").write(consumerName).write("\",consumer_id=\"") - .write(consumerId).write("\"} "); - stream.write(value); - appendEndings(stream); + private static void writeConsumerMetric(PrometheusMetricStreams stream, String metricName, Number value, + String cluster, String namespace, String topic, String subscription, + Consumer consumer, boolean splitTopicAndPartitionIndexLabel) { + writeTopicMetric(stream, metricName, value, cluster, namespace, topic, splitTopicAndPartitionIndexLabel, + "subscription", subscription, "consumer_name", consumer.consumerName(), + "consumer_id", String.valueOf(consumer.consumerId())); } - private static void metricWithRemoteCluster(SimpleTextOutputStream stream, String cluster, String namespace, - String topic, String name, String remoteCluster, double value, boolean splitTopicAndPartitionIndexLabel) { - metricType(stream, name); - appendRequiredLabels(stream, cluster, namespace, topic, name, splitTopicAndPartitionIndexLabel) - .write("\",remote_cluster=\"").write(remoteCluster).write("\"} "); - stream.write(value); - appendEndings(stream); - } - - private static SimpleTextOutputStream appendRequiredLabels(SimpleTextOutputStream stream, String cluster, - String namespace, String topic, String name, boolean splitTopicAndPartitionIndexLabel) { - stream.write(name).write("{cluster=\"").write(cluster).write("\",namespace=\"").write(namespace); + static void writeTopicMetric(PrometheusMetricStreams stream, String metricName, Number value, String cluster, + String namespace, String topic, boolean splitTopicAndPartitionIndexLabel, + String... extraLabelsAndValues) { + String[] labelsAndValues = new String[splitTopicAndPartitionIndexLabel ? 8 : 6]; + labelsAndValues[0] = "cluster"; + labelsAndValues[1] = cluster; + labelsAndValues[2] = "namespace"; + labelsAndValues[3] = namespace; + labelsAndValues[4] = "topic"; if (splitTopicAndPartitionIndexLabel) { int index = topic.indexOf(PARTITIONED_TOPIC_SUFFIX); if (index > 0) { - stream.write("\",topic=\"").write(topic.substring(0, index)).write("\",partition=\"") - .write(topic.substring(index + PARTITIONED_TOPIC_SUFFIX.length())); + labelsAndValues[5] = topic.substring(0, index); + labelsAndValues[6] = "partition"; + labelsAndValues[7] = topic.substring(index + PARTITIONED_TOPIC_SUFFIX.length()); } else { - stream.write("\",topic=\"").write(topic).write("\",partition=\"").write("-1"); + labelsAndValues[5] = topic; + labelsAndValues[6] = "partition"; + labelsAndValues[7] = "-1"; } } else { - stream.write("\",topic=\"").write(topic); + labelsAndValues[5] = topic; } - return stream; - } - - private static void appendEndings(SimpleTextOutputStream stream) { - stream.write(' ').write(System.currentTimeMillis()).write('\n'); + String[] labels = ArrayUtils.addAll(labelsAndValues, extraLabelsAndValues); + stream.writeSample(metricName, value, labels); } -} +} \ No newline at end of file diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TransactionAggregator.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TransactionAggregator.java index 142ec48ae956a..8c58b516333f5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TransactionAggregator.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TransactionAggregator.java @@ -28,7 +28,6 @@ import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.util.SimpleTextOutputStream; import org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl; import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; import org.apache.pulsar.transaction.coordinator.impl.TransactionMetadataStoreStats; @@ -39,7 +38,7 @@ public class TransactionAggregator { private static final FastThreadLocal localTransactionCoordinatorStats = new FastThreadLocal() { @Override - protected AggregatedTransactionCoordinatorStats initialValue() throws Exception { + protected AggregatedTransactionCoordinatorStats initialValue() { return new AggregatedTransactionCoordinatorStats(); } }; @@ -47,27 +46,27 @@ protected AggregatedTransactionCoordinatorStats initialValue() throws Exception private static final FastThreadLocal localManageLedgerStats = new FastThreadLocal() { @Override - protected ManagedLedgerStats initialValue() throws Exception { + protected ManagedLedgerStats initialValue() { return new ManagedLedgerStats(); } }; - public static void generate(PulsarService pulsar, SimpleTextOutputStream stream, boolean includeTopicMetrics) { + public static void generate(PulsarService pulsar, PrometheusMetricStreams stream, boolean includeTopicMetrics) { String cluster = pulsar.getConfiguration().getClusterName(); if (includeTopicMetrics) { - pulsar.getBrokerService().getMultiLayerTopicMap().forEach((namespace, bundlesMap) -> { - bundlesMap.forEach((bundle, topicsMap) -> { - topicsMap.forEach((name, topic) -> { + pulsar.getBrokerService().getMultiLayerTopicMap().forEach((namespace, bundlesMap) -> + bundlesMap.forEach((bundle, topicsMap) -> topicsMap.forEach((name, topic) -> { if (topic instanceof PersistentTopic) { topic.getSubscriptions().values().forEach(subscription -> { try { localManageLedgerStats.get().reset(); - if (!checkTopicIsEventsNames(TopicName.get(subscription.getTopic().getName()))) { - ManagedLedger managedLedger = - ((PersistentSubscription) subscription) - .getPendingAckManageLedger().get(); + if (!checkTopicIsEventsNames(TopicName.get(subscription.getTopic().getName())) + && subscription instanceof PersistentSubscription + && ((PersistentSubscription) subscription).checkIfPendingAckStoreInit()) { + ManagedLedger managedLedger = ((PersistentSubscription) subscription) + .getPendingAckManageLedger().get(); generateManageLedgerStats(managedLedger, stream, cluster, namespace, name, subscription.getName()); } @@ -76,9 +75,7 @@ public static void generate(PulsarService pulsar, SimpleTextOutputStream stream, } }); } - }); - }); - }); + }))); } AggregatedTransactionCoordinatorStats transactionCoordinatorStats = localTransactionCoordinatorStats.get(); @@ -107,18 +104,18 @@ public static void generate(PulsarService pulsar, SimpleTextOutputStream stream, localManageLedgerStats.get().reset(); if (transactionMetadataStore instanceof MLTransactionMetadataStore) { - ManagedLedger managedLedger = - ((MLTransactionMetadataStore) transactionMetadataStore).getManagedLedger(); + ManagedLedger managedLedger = + ((MLTransactionMetadataStore) transactionMetadataStore).getManagedLedger(); generateManageLedgerStats(managedLedger, stream, cluster, NamespaceName.SYSTEM_NAMESPACE.toString(), MLTransactionLogImpl.TRANSACTION_LOG_PREFIX + transactionCoordinatorID.getId(), MLTransactionLogImpl.TRANSACTION_SUBSCRIPTION_NAME); } - }); + }); } - private static void generateManageLedgerStats(ManagedLedger managedLedger, SimpleTextOutputStream stream, + private static void generateManageLedgerStats(ManagedLedger managedLedger, PrometheusMetricStreams stream, String cluster, String namespace, String topic, String subscription) { ManagedLedgerStats managedLedgerStats = localManageLedgerStats.get(); ManagedLedgerMBeanImpl mlStats = (ManagedLedgerMBeanImpl) managedLedger.getStats(); @@ -140,165 +137,149 @@ private static void generateManageLedgerStats(ManagedLedger managedLedger, Simpl managedLedgerStats.storageWriteRate = mlStats.getAddEntryMessagesRate(); managedLedgerStats.storageReadRate = mlStats.getReadEntriesRate(); - printManageLedgerStats(stream, cluster, namespace, topic, - subscription, managedLedgerStats); - } - - private static void metric(SimpleTextOutputStream stream, String cluster, String name, - double value, long coordinatorId) { - stream.write("# TYPE ").write(name).write(" gauge\n") - .write(name) - .write("{cluster=\"").write(cluster) - .write("\",coordinator_id=\"").write(coordinatorId).write("\"} ") - .write(value).write(' ').write(System.currentTimeMillis()) - .write('\n'); - } - - private static void metrics(SimpleTextOutputStream stream, String cluster, String namespace, - String topic, String subscription, String name, long value) { - stream.write(name).write("{cluster=\"").write(cluster).write("\", namespace=\"").write(namespace) - .write("\",topic=\"").write(topic).write("\",subscription=\"").write(subscription).write("\"} "); - stream.write(value).write(' ').write(System.currentTimeMillis()).write('\n'); - } - - private static void metrics(SimpleTextOutputStream stream, String cluster, String namespace, - String topic, String subscription, String name, double value) { - stream.write(name).write("{cluster=\"").write(cluster).write("\", namespace=\"").write(namespace) - .write("\",topic=\"").write(topic).write("\",subscription=\"").write(subscription).write("\"} "); - stream.write(value).write(' ').write(System.currentTimeMillis()).write('\n'); + printManageLedgerStats(stream, cluster, namespace, topic, subscription, managedLedgerStats); } - private static void printManageLedgerStats(SimpleTextOutputStream stream, String cluster, String namespace, + private static void printManageLedgerStats(PrometheusMetricStreams stream, String cluster, String namespace, String topic, String subscription, ManagedLedgerStats stats) { - metrics(stream, cluster, namespace, topic, subscription, - "pulsar_storage_size", stats.storageSize); - metrics(stream, cluster, namespace, topic, subscription, - "pulsar_storage_logical_size", stats.storageLogicalSize); - metrics(stream, cluster, namespace, topic, subscription, - "pulsar_storage_backlog_size", stats.backlogSize); - metrics(stream, cluster, namespace, topic, subscription, - "pulsar_storage_offloaded_size", stats.offloadedStorageUsed); + writeMetric(stream, "pulsar_storage_size", stats.storageSize, cluster, namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_logical_size", stats.storageLogicalSize, cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_backlog_size", stats.backlogSize, cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_offloaded_size", stats.offloadedStorageUsed, cluster, namespace, topic, + subscription); - metrics(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_rate", stats.storageWriteRate); - metrics(stream, cluster, namespace, topic, subscription, - "pulsar_storage_read_rate", stats.storageReadRate); + writeMetric(stream, "pulsar_storage_write_rate", stats.storageWriteRate, cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_read_rate", stats.storageReadRate, cluster, namespace, topic, + subscription); stats.storageWriteLatencyBuckets.refresh(); long[] latencyBuckets = stats.storageWriteLatencyBuckets.getBuckets(); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_0_5", latencyBuckets[0]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_1", latencyBuckets[1]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_5", latencyBuckets[2]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_10", latencyBuckets[3]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_20", latencyBuckets[4]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_50", latencyBuckets[5]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_100", latencyBuckets[6]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_200", latencyBuckets[7]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_le_1000", latencyBuckets[8]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_write_latency_overflow", latencyBuckets[9]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_storage_write_latency_count", - stats.storageWriteLatencyBuckets.getCount()); - metric(stream, cluster, namespace, topic, subscription, "pulsar_storage_write_latency_sum", - stats.storageWriteLatencyBuckets.getSum()); + writeMetric(stream, "pulsar_storage_write_latency_le_0_5", latencyBuckets[0], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_1", latencyBuckets[1], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_5", latencyBuckets[2], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_10", latencyBuckets[3], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_20", latencyBuckets[4], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_50", latencyBuckets[5], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_100", latencyBuckets[6], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_200", latencyBuckets[7], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_le_1000", latencyBuckets[8], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_overflow", latencyBuckets[9], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_storage_write_latency_count", stats.storageWriteLatencyBuckets.getCount(), + cluster, namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_write_latency_sum", stats.storageWriteLatencyBuckets.getSum(), cluster, + namespace, topic, subscription); stats.storageLedgerWriteLatencyBuckets.refresh(); - long[] ledgerWritelatencyBuckets = stats.storageLedgerWriteLatencyBuckets.getBuckets(); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_0_5", ledgerWritelatencyBuckets[0]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_1", ledgerWritelatencyBuckets[1]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_5", ledgerWritelatencyBuckets[2]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_10", ledgerWritelatencyBuckets[3]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_20", ledgerWritelatencyBuckets[4]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_50", ledgerWritelatencyBuckets[5]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_100", ledgerWritelatencyBuckets[6]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_200", ledgerWritelatencyBuckets[7]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_storage_ledger_write_latency_le_1000", ledgerWritelatencyBuckets[8]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_storage_ledger_write_latency_overflow", - ledgerWritelatencyBuckets[9]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_storage_ledger_write_latency_count", - stats.storageLedgerWriteLatencyBuckets.getCount()); - metric(stream, cluster, namespace, topic, subscription, "pulsar_storage_ledger_write_latency_sum", - stats.storageLedgerWriteLatencyBuckets.getSum()); + long[] ledgerWriteLatencyBuckets = stats.storageLedgerWriteLatencyBuckets.getBuckets(); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_0_5", ledgerWriteLatencyBuckets[0], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_1", ledgerWriteLatencyBuckets[1], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_5", ledgerWriteLatencyBuckets[2], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_10", ledgerWriteLatencyBuckets[3], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_20", ledgerWriteLatencyBuckets[4], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_50", ledgerWriteLatencyBuckets[5], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_100", ledgerWriteLatencyBuckets[6], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_200", ledgerWriteLatencyBuckets[7], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_le_1000", ledgerWriteLatencyBuckets[8], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_overflow", ledgerWriteLatencyBuckets[9], cluster, + namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_count", + stats.storageLedgerWriteLatencyBuckets.getCount(), cluster, namespace, topic, subscription); + writeMetric(stream, "pulsar_storage_ledger_write_latency_sum", + stats.storageLedgerWriteLatencyBuckets.getSum(), cluster, namespace, topic, subscription); stats.entrySizeBuckets.refresh(); long[] entrySizeBuckets = stats.entrySizeBuckets.getBuckets(); - metric(stream, cluster, namespace, topic, subscription, "pulsar_entry_size_le_128", entrySizeBuckets[0]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_entry_size_le_512", entrySizeBuckets[1]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_entry_size_le_1_kb", entrySizeBuckets[2]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_entry_size_le_2_kb", entrySizeBuckets[3]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_entry_size_le_4_kb", entrySizeBuckets[4]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_entry_size_le_16_kb", entrySizeBuckets[5]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_entry_size_le_100_kb", entrySizeBuckets[6]); - metric(stream, cluster, namespace, topic, subscription, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_entry_size_le_overflow", entrySizeBuckets[8]); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_entry_size_count", stats.entrySizeBuckets.getCount()); - metric(stream, cluster, namespace, topic, subscription, - "pulsar_entry_size_sum", stats.entrySizeBuckets.getSum()); - } - - private static void metric(SimpleTextOutputStream stream, String cluster, - String namespace, String topic, String subscription, - String name, long value) { - stream.write(name).write("{cluster=\"").write(cluster).write("\",namespace=\"").write(namespace) - .write("\",topic=\"").write(topic).write("\",subscription=\"").write(subscription).write("\"} "); - stream.write(value).write(' ').write(System.currentTimeMillis()).write('\n'); + writeMetric(stream, "pulsar_entry_size_le_128", entrySizeBuckets[0], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_512", entrySizeBuckets[1], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_1_kb", entrySizeBuckets[2], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_2_kb", entrySizeBuckets[3], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_4_kb", entrySizeBuckets[4], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_16_kb", entrySizeBuckets[5], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_100_kb", entrySizeBuckets[6], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_le_overflow", entrySizeBuckets[8], cluster, namespace, topic, + subscription); + writeMetric(stream, "pulsar_entry_size_count", stats.entrySizeBuckets.getCount(), cluster, namespace, + topic, subscription); + writeMetric(stream, "pulsar_entry_size_sum", stats.entrySizeBuckets.getSum(), cluster, namespace, topic, + subscription); } - static void printTransactionCoordinatorStats(SimpleTextOutputStream stream, String cluster, + static void printTransactionCoordinatorStats(PrometheusMetricStreams stream, String cluster, AggregatedTransactionCoordinatorStats stats, long coordinatorId) { - metric(stream, cluster, "pulsar_txn_active_count", - stats.actives, coordinatorId); - metric(stream, cluster, "pulsar_txn_committed_count", - stats.committedCount, coordinatorId); - metric(stream, cluster, "pulsar_txn_aborted_count", - stats.abortedCount, coordinatorId); - metric(stream, cluster, "pulsar_txn_created_count", - stats.createdCount, coordinatorId); - metric(stream, cluster, "pulsar_txn_timeout_count", - stats.timeoutCount, coordinatorId); - metric(stream, cluster, "pulsar_txn_append_log_count", - stats.appendLogCount, coordinatorId); + writeMetric(stream, "pulsar_txn_active_count", stats.actives, cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_committed_count", stats.committedCount, cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_aborted_count", stats.abortedCount, cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_created_count", stats.createdCount, cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_timeout_count", stats.timeoutCount, cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_append_log_count", stats.appendLogCount, cluster, + coordinatorId); long[] latencyBuckets = stats.executionLatency; - metric(stream, cluster, "pulsar_txn_execution_latency_le_10", latencyBuckets[0], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_20", latencyBuckets[1], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_50", latencyBuckets[2], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_100", latencyBuckets[3], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_500", latencyBuckets[4], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_1000", latencyBuckets[5], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_5000", latencyBuckets[6], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_15000", latencyBuckets[7], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_30000", latencyBuckets[8], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_60000", latencyBuckets[9], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_300000", - latencyBuckets[10], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_1500000", - latencyBuckets[11], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_3000000", - latencyBuckets[12], coordinatorId); - metric(stream, cluster, "pulsar_txn_execution_latency_le_overflow", - latencyBuckets[13], coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_10", latencyBuckets[0], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_20", latencyBuckets[1], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_50", latencyBuckets[2], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_100", latencyBuckets[3], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_500", latencyBuckets[4], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_1000", latencyBuckets[5], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_5000", latencyBuckets[6], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_15000", latencyBuckets[7], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_30000", latencyBuckets[8], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_60000", latencyBuckets[9], cluster, coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_300000", latencyBuckets[10], cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_1500000", latencyBuckets[11], cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_3000000", latencyBuckets[12], cluster, + coordinatorId); + writeMetric(stream, "pulsar_txn_execution_latency_le_overflow", latencyBuckets[13], cluster, + coordinatorId); + } + + private static void writeMetric(PrometheusMetricStreams stream, String metricName, double value, String cluster, + long coordinatorId) { + stream.writeSample(metricName, value, "cluster", cluster, "coordinator_id", String.valueOf(coordinatorId)); + } + + private static void writeMetric(PrometheusMetricStreams stream, String metricName, Number value, String cluster, + String namespace, String topic, String subscription) { + stream.writeSample(metricName, value, "cluster", cluster, "namespace", namespace, "topic", topic, + "subscription", subscription); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusMetricsProvider.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusMetricsProvider.java index dfb27f4c92c27..0e59286861a40 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusMetricsProvider.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusMetricsProvider.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.stats.prometheus.metrics; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.annotations.VisibleForTesting; import io.netty.util.concurrent.DefaultThreadFactory; import io.prometheus.client.Collector; @@ -95,9 +96,8 @@ public void start(Configuration conf) { DEFAULT_PROMETHEUS_STATS_LATENCY_ROLLOVER_SECONDS); cluster = conf.getString(CLUSTER_NAME, DEFAULT_CLUSTER_NAME); - executor.scheduleAtFixedRate(() -> { - rotateLatencyCollection(); - }, 1, latencyRolloverSeconds, TimeUnit.SECONDS); + executor.scheduleAtFixedRate(catchingAndLoggingThrowables(this::rotateLatencyCollection), + 1, latencyRolloverSeconds, TimeUnit.SECONDS); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusTextFormatUtil.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusTextFormatUtil.java index 7550096c2b584..8f704b11e764c 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusTextFormatUtil.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/metrics/PrometheusTextFormatUtil.java @@ -18,13 +18,8 @@ */ package org.apache.pulsar.broker.stats.prometheus.metrics; -import io.prometheus.client.Collector; -import io.prometheus.client.Collector.MetricFamilySamples; -import io.prometheus.client.Collector.MetricFamilySamples.Sample; -import io.prometheus.client.CollectorRegistry; import java.io.IOException; import java.io.Writer; -import java.util.Enumeration; import org.apache.bookkeeper.stats.Counter; /** @@ -140,31 +135,4 @@ private static void writeSum(Writer w, DataSketchesOpStatsLogger opStat, String .append(success.toString()).append("\"} ") .append(Double.toString(opStat.getSum(success))).append('\n'); } - - public static void writeMetricsCollectedByPrometheusClient(Writer w, CollectorRegistry registry) - throws IOException { - Enumeration metricFamilySamples = registry.metricFamilySamples(); - while (metricFamilySamples.hasMoreElements()) { - MetricFamilySamples metricFamily = metricFamilySamples.nextElement(); - - for (int i = 0; i < metricFamily.samples.size(); i++) { - Sample sample = metricFamily.samples.get(i); - w.write(sample.name); - w.write('{'); - for (int j = 0; j < sample.labelNames.size(); j++) { - if (j != 0) { - w.write(", "); - } - w.write(sample.labelNames.get(j)); - w.write("=\""); - w.write(sample.labelValues.get(j)); - w.write('"'); - } - - w.write("} "); - w.write(Collector.doubleToGoString(sample.value)); - w.write('\n'); - } - } - } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClient.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClient.java index e838a694233a2..33bfc59156768 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClient.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/SystemTopicClient.java @@ -21,12 +21,9 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.CompletableFuture; -import org.apache.commons.lang3.StringUtils; -import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.common.events.EventsTopicNames; import org.apache.pulsar.common.naming.TopicName; /** @@ -187,22 +184,4 @@ interface Reader { */ SystemTopicClient getSystemTopic(); } - - static boolean isSystemTopic(TopicName topicName) { - TopicName nonePartitionedTopicName = TopicName.get(topicName.getPartitionedTopicName()); - - // event topic - if (EventsTopicNames.checkTopicIsEventsNames(nonePartitionedTopicName)) { - return true; - } - - String localName = nonePartitionedTopicName.getLocalName(); - // transaction pending ack topic - if (StringUtils.endsWith(localName, MLPendingAckStore.PENDING_ACK_STORE_SUFFIX)) { - return true; - } - - return false; - } - } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TransactionBufferSystemTopicClient.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TransactionBufferSystemTopicClient.java index 807bb9d174bfe..aaab858ab1ee2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TransactionBufferSystemTopicClient.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/systopic/TransactionBufferSystemTopicClient.java @@ -128,10 +128,17 @@ public void close() throws IOException { @Override public CompletableFuture closeAsync() { - return producer.closeAsync().thenCompose(v -> { + CompletableFuture completableFuture = new CompletableFuture<>(); + producer.closeAsync().whenComplete((v, e) -> { + // if close fail, also need remove the producer transactionBufferSystemTopicClient.removeWriter(this); - return CompletableFuture.completedFuture(null); + if (e != null) { + completableFuture.completeExceptionally(e); + return; + } + completableFuture.complete(null); }); + return completableFuture; } @Override @@ -179,10 +186,17 @@ public void close() throws IOException { @Override public CompletableFuture closeAsync() { - return reader.closeAsync().thenCompose(v -> { + CompletableFuture completableFuture = new CompletableFuture<>(); + reader.closeAsync().whenComplete((v, e) -> { + // if close fail, also need remove the reader transactionBufferSystemTopicClient.removeReader(this); - return CompletableFuture.completedFuture(null); + if (e != null) { + completableFuture.completeExceptionally(e); + return; + } + completableFuture.complete(null); }); + return completableFuture; } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferReader.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferReader.java index 4547de00dc8df..54dafd55410a1 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferReader.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferReader.java @@ -21,7 +21,8 @@ import com.google.common.annotations.Beta; import java.util.List; import java.util.concurrent.CompletableFuture; -import org.apache.pulsar.broker.transaction.buffer.exceptions.EndOfTransactionException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; + /** * A reader to read entries of a given transaction from transaction buffer. @@ -38,7 +39,7 @@ public interface TransactionBufferReader extends AutoCloseable { * * @param numEntries the number of entries to read from transaction buffer. * @return a future represents the result of the read operations. - * @throws EndOfTransactionException if reaching end of the transaction and no + * @throws TransactionBufferException.EndOfTransactionException if reaching end of the transaction and no * more entries to return. */ CompletableFuture> readNext(int numEntries); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionMeta.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionMeta.java index 9afc05e039640..347ab74564780 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionMeta.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/TransactionMeta.java @@ -22,7 +22,7 @@ import java.util.SortedMap; import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.Position; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionStatusException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.transaction.coordinator.proto.TxnStatus; @@ -57,9 +57,9 @@ public interface TransactionMeta { * Return messages number in one transaction. * * @return the number of transaction messages - * @throws TransactionStatusException + * @throws TransactionBufferException.TransactionStatusException */ - int numMessageInTxn() throws TransactionStatusException; + int numMessageInTxn() throws TransactionBufferException.TransactionStatusException; /** * Return the committed ledger id at data ledger. diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionNotSealedException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionNotSealedException.java deleted file mode 100644 index c732c8856a3be..0000000000000 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionNotSealedException.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; - -/** - * Exception is thrown when opening a reader on a transaction that is not sealed yet. - */ -public class TransactionNotSealedException extends TransactionBufferException { - - private static final long serialVersionUID = 0L; - - public TransactionNotSealedException(String message) { - super(message); - } -} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionSealedException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionSealedException.java deleted file mode 100644 index 13fdec5cd7769..0000000000000 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionSealedException.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; - -/** - * Exception thrown if a transaction is already sealed. - * - *

If a transaction is sealed, no more entries should be appended to this transaction. - */ -public class TransactionSealedException extends TransactionBufferException { - - private static final long serialVersionUID = 5366602873819540477L; - - public TransactionSealedException(String message) { - super(message); - } -} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java index 2c32d0b68b1e8..98d572cc0827f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBuffer.java @@ -36,10 +36,7 @@ import org.apache.pulsar.broker.transaction.buffer.TransactionBuffer; import org.apache.pulsar.broker.transaction.buffer.TransactionBufferReader; import org.apache.pulsar.broker.transaction.buffer.TransactionMeta; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionNotFoundException; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionNotSealedException; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionSealedException; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionStatusException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.policies.data.TransactionBufferStats; import org.apache.pulsar.common.policies.data.TransactionInBufferStats; @@ -86,7 +83,7 @@ public int numEntries() { } @Override - public int numMessageInTxn() throws TransactionStatusException { + public int numMessageInTxn() throws TransactionBufferException.TransactionStatusException { return -1; } @@ -125,7 +122,7 @@ public CompletableFuture committingTxn() { public CompletableFuture commitTxn(long committedAtLedgerId, long committedAtEntryId) { try { return CompletableFuture.completedFuture(commitAt(committedAtLedgerId, committedAtEntryId)); - } catch (TransactionStatusException e) { + } catch (TransactionBufferException.TransactionStatusException e) { return FutureUtil.failedFuture(e); } } @@ -134,23 +131,23 @@ public CompletableFuture commitTxn(long committedAtLedgerId, lo public CompletableFuture abortTxn() { try { return CompletableFuture.completedFuture(abort()); - } catch (TransactionStatusException e) { + } catch (TransactionBufferException.TransactionStatusException e) { return FutureUtil.failedFuture(e); } } - synchronized TxnBuffer abort() throws TransactionStatusException { + synchronized TxnBuffer abort() throws TransactionBufferException.TransactionStatusException { if (TxnStatus.OPEN != status) { - throw new TransactionStatusException(txnid, TxnStatus.OPEN, status); + throw new TransactionBufferException.TransactionStatusException(txnid, TxnStatus.OPEN, status); } this.status = TxnStatus.ABORTED; return this; } synchronized TxnBuffer commitAt(long committedAtLedgerId, long committedAtEntryId) - throws TransactionStatusException { + throws TransactionBufferException.TransactionStatusException { if (TxnStatus.OPEN != status) { - throw new TransactionStatusException(txnid, TxnStatus.OPEN, status); + throw new TransactionBufferException.TransactionStatusException(txnid, TxnStatus.OPEN, status); } this.committedAtLedgerId = committedAtLedgerId; @@ -170,11 +167,13 @@ public void close() { } } - public void appendEntry(long sequenceId, ByteBuf entry) throws TransactionSealedException { + public void appendEntry(long sequenceId, ByteBuf entry) throws + TransactionBufferException.TransactionSealedException { synchronized (this) { if (TxnStatus.OPEN != status) { // the transaction is not open anymore, reject the append operations - throw new TransactionSealedException("Transaction `" + txnid + "` is already sealed"); + throw new TransactionBufferException + .TransactionSealedException("Transaction `" + txnid + "` is already sealed"); } } @@ -183,17 +182,19 @@ public void appendEntry(long sequenceId, ByteBuf entry) throws TransactionSealed } } - public TransactionBufferReader newReader(long sequenceId) throws TransactionNotSealedException { + public TransactionBufferReader newReader(long sequenceId) throws + TransactionBufferException.TransactionNotSealedException { synchronized (this) { if (TxnStatus.COMMITTED != status) { // the transaction is not committed yet, hence the buffer is not sealed - throw new TransactionNotSealedException("Transaction `" + txnid + "` is not sealed yet"); + throw new TransactionBufferException + .TransactionNotSealedException("Transaction `" + txnid + "` is not sealed yet"); } } final SortedMap entriesToRead = new TreeMap<>(); synchronized (entries) { - SortedMap subEntries = entries.tailMap(Long.valueOf(sequenceId)); + SortedMap subEntries = entries.tailMap(sequenceId); subEntries.values().forEach(value -> value.retain()); entriesToRead.putAll(subEntries); } @@ -220,17 +221,17 @@ public CompletableFuture getTransactionMeta(TxnID txnID) { CompletableFuture getFuture = new CompletableFuture<>(); try { getFuture.complete(getTxnBufferOrThrowNotFoundException(txnID)); - } catch (TransactionNotFoundException e) { + } catch (TransactionBufferException.TransactionNotFoundException e) { getFuture.completeExceptionally(e); } return getFuture; } private TxnBuffer getTxnBufferOrThrowNotFoundException(TxnID txnID) - throws TransactionNotFoundException { + throws TransactionBufferException.TransactionNotFoundException { TxnBuffer buffer = buffers.get(txnID); if (null == buffer) { - throw new TransactionNotFoundException( + throw new TransactionBufferException.TransactionNotFoundException( "Transaction `" + txnID + "` doesn't exist in the transaction buffer"); } return buffer; @@ -262,7 +263,7 @@ public CompletableFuture appendBufferToTxn(TxnID txnId, try { txnBuffer.appendEntry(sequenceId, buffer); appendFuture.complete(null); - } catch (TransactionSealedException e) { + } catch (TransactionBufferException.TransactionSealedException e) { appendFuture.completeExceptionally(e); } return appendFuture; @@ -276,7 +277,8 @@ public CompletableFuture openTransactionBufferReader( TxnBuffer txnBuffer = getTxnBufferOrThrowNotFoundException(txnID); TransactionBufferReader reader = txnBuffer.newReader(startSequenceId); openFuture.complete(reader); - } catch (TransactionNotFoundException | TransactionNotSealedException e) { + } catch (TransactionBufferException.TransactionNotFoundException + | TransactionBufferException.TransactionNotSealedException e) { openFuture.completeExceptionally(e); } return openFuture; @@ -295,7 +297,8 @@ public CompletableFuture commitTxn(TxnID txnID, long lowWaterMark) { addTxnToTxnIdex(txnID, committedAtLedgerId); } commitFuture.complete(null); - } catch (TransactionNotFoundException | TransactionStatusException e) { + } catch (TransactionBufferException.TransactionNotFoundException + | TransactionBufferException.TransactionStatusException e) { commitFuture.completeExceptionally(e); } return commitFuture; @@ -318,7 +321,8 @@ public CompletableFuture abortTxn(TxnID txnID, long lowWaterMark) { txnBuffer.abort(); buffers.remove(txnID, txnBuffer); abortFuture.complete(null); - } catch (TransactionNotFoundException | TransactionStatusException e) { + } catch (TransactionBufferException.TransactionNotFoundException + | TransactionBufferException.TransactionStatusException e) { abortFuture.completeExceptionally(e); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBufferReader.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBufferReader.java index 91b095ede5b94..d81ce02aebe7b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBufferReader.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/InMemTransactionBufferReader.java @@ -27,7 +27,7 @@ import org.apache.bookkeeper.mledger.impl.EntryImpl; import org.apache.pulsar.broker.transaction.buffer.TransactionBufferReader; import org.apache.pulsar.broker.transaction.buffer.TransactionEntry; -import org.apache.pulsar.broker.transaction.buffer.exceptions.EndOfTransactionException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.client.api.transaction.TxnID; /** @@ -78,7 +78,7 @@ public synchronized CompletableFuture> readNext(int numEn } if (txnEntries.isEmpty()) { - readFuture.completeExceptionally(new EndOfTransactionException( + readFuture.completeExceptionally(new TransactionBufferException.EndOfTransactionException( "No more entries found in transaction `" + txnId + "`" )); } else { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java index 79f7f35732754..3dc9ff88c2028 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBuffer.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.transaction.buffer.impl; +import com.google.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; import io.netty.util.Timeout; import io.netty.util.Timer; @@ -25,6 +26,8 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import lombok.SneakyThrows; @@ -37,6 +40,7 @@ import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.collections4.map.LinkedMap; +import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.service.BrokerServiceException.PersistenceException; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.systopic.SystemTopicClient; @@ -54,6 +58,7 @@ import org.apache.pulsar.common.policies.data.TransactionInBufferStats; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.Markers; +import org.apache.pulsar.common.util.Codec; import org.jctools.queues.MessagePassingQueue; import org.jctools.queues.SpscArrayQueue; @@ -93,10 +98,16 @@ public class TopicTransactionBuffer extends TopicTransactionBufferState implemen private final CompletableFuture transactionBufferFuture = new CompletableFuture<>(); + /** + * The map is used to store the lowWaterMarks which key is TC ID and value is lowWaterMark of the TC. + */ + private final ConcurrentHashMap lowWaterMarks = new ConcurrentHashMap<>(); + + private final Semaphore handleLowWaterMark = new Semaphore(1); + public TopicTransactionBuffer(PersistentTopic topic) { super(State.None); this.topic = topic; - this.changeToInitializingState(); this.takeSnapshotWriter = this.topic.getBrokerService().getPulsar() .getTransactionBufferSnapshotService().createWriter(TopicName.get(topic.getName())); this.timer = topic.getBrokerService().getPulsar().getTransactionTimer(); @@ -105,25 +116,48 @@ public TopicTransactionBuffer(PersistentTopic topic) { this.takeSnapshotIntervalTime = topic.getBrokerService().getPulsar() .getConfiguration().getTransactionBufferSnapshotMinTimeInMillis(); this.maxReadPosition = (PositionImpl) topic.getManagedLedger().getLastConfirmedEntry(); - this.topic.getBrokerService().getPulsar().getTransactionReplayExecutor() + this.recover(); + } + + private void recover() { + this.topic.getBrokerService().getPulsar().getTransactionExecutorProvider().getExecutor(this) .execute(new TopicTransactionBufferRecover(new TopicTransactionBufferRecoverCallBack() { @Override public void recoverComplete() { - if (!changeToReadyState()) { - log.error("[{}]Transaction buffer recover fail", topic.getName()); - } else { - timer.newTimeout(TopicTransactionBuffer.this, - takeSnapshotIntervalTime, TimeUnit.MILLISECONDS); - transactionBufferFuture.complete(null); + synchronized (TopicTransactionBuffer.this) { + // sync maxReadPosition change to LAC when TopicTransaction buffer have not recover + // completely the normal message have been sent to broker and state is + // not Ready can't sync maxReadPosition when no ongoing transactions + if (ongoingTxns.isEmpty()) { + maxReadPosition = (PositionImpl) topic.getManagedLedger().getLastConfirmedEntry(); + } + if (!changeToReadyState()) { + log.error("[{}]Transaction buffer recover fail, current state: {}", + topic.getName(), getState()); + transactionBufferFuture.completeExceptionally + (new BrokerServiceException.ServiceUnitNotReadyException( + "Transaction buffer recover failed to change the status to Ready," + + "current state is: " + getState())); + } else { + timer.newTimeout(TopicTransactionBuffer.this, + takeSnapshotIntervalTime, TimeUnit.MILLISECONDS); + transactionBufferFuture.complete(null); + } } } @Override public void noNeedToRecover() { - if (!changeToNoSnapshotState()) { - log.error("[{}]Transaction buffer recover fail", topic.getName()); - } else { - transactionBufferFuture.complete(null); + synchronized (TopicTransactionBuffer.this) { + // sync maxReadPosition change to LAC when TopicTransaction buffer have not recover + // completely the normal message have been sent to broker and state is + // not NoSnapshot can't sync maxReadPosition + maxReadPosition = (PositionImpl) topic.getManagedLedger().getLastConfirmedEntry(); + if (!changeToNoSnapshotState()) { + log.error("[{}]Transaction buffer recover fail", topic.getName()); + } else { + transactionBufferFuture.complete(null); + } } } @@ -161,15 +195,29 @@ public void handleTxnEntry(Entry entry) { } @Override - public void recoverExceptionally(Exception e) { - transactionBufferFuture.completeExceptionally(e); + public void recoverExceptionally(Throwable e) { + + log.warn("Closing topic {} due to read transaction buffer snapshot while recovering the " + + "transaction buffer throw exception", topic.getName(), e); + // when create reader or writer fail throw PulsarClientException, + // should close this topic and then reinit this topic + if (e instanceof PulsarClientException) { + // if transaction buffer recover fail throw PulsarClientException, + // we need to change the PulsarClientException to ServiceUnitNotReadyException, + // the tc do op will retry + transactionBufferFuture.completeExceptionally + (new BrokerServiceException.ServiceUnitNotReadyException(e.getMessage(), e)); + } else { + transactionBufferFuture.completeExceptionally(e); + } + topic.close(true); } - }, this.topic, this)); + }, this.topic, this, takeSnapshotWriter)); } @Override public CompletableFuture getTransactionMeta(TxnID txnID) { - return null; + return CompletableFuture.completedFuture(null); } @Override @@ -195,8 +243,8 @@ public CompletableFuture checkIfTBRecoverCompletely(boolean isTxnEnabled) completableFuture.complete(null); } }).exceptionally(exception -> { - log.error("Topic {}: TransactionBuffer recover failed", this.topic.getName(), exception); - completableFuture.completeExceptionally(exception); + log.error("Topic {}: TransactionBuffer recover failed", this.topic.getName(), exception.getCause()); + completableFuture.completeExceptionally(exception.getCause()); return null; }); return completableFuture; @@ -207,6 +255,13 @@ public CompletableFuture checkIfTBRecoverCompletely(boolean isTxnEnabled) @Override public CompletableFuture appendBufferToTxn(TxnID txnId, long sequenceId, ByteBuf buffer) { CompletableFuture completableFuture = new CompletableFuture<>(); + Long lowWaterMark = lowWaterMarks.get(txnId.getMostSigBits()); + if (lowWaterMark != null && lowWaterMark >= txnId.getLeastSigBits()) { + completableFuture.completeExceptionally(new BrokerServiceException + .NotAllowedException("Transaction [" + txnId + "] has been ended. " + + "Please use a new transaction to send message.")); + return completableFuture; + } topic.getManagedLedger().asyncAddEntry(buffer, new AsyncCallbacks.AddEntryCallback() { @Override public void addComplete(Position position, ByteBuf entryData, Object ctx) { @@ -219,7 +274,7 @@ public void addComplete(Position position, ByteBuf entryData, Object ctx) { @Override public void addFailed(ManagedLedgerException exception, Object ctx) { log.error("Failed to append buffer to txn {}", txnId, exception); - completableFuture.completeExceptionally(new PersistenceException(exception)); + completableFuture.completeExceptionally(exception); } }, null); return completableFuture; @@ -266,6 +321,7 @@ public void addComplete(Position position, ByteBuf entryData, Object ctx) { @Override public void addFailed(ManagedLedgerException exception, Object ctx) { log.error("Failed to commit for txn {}", txnID, exception); + checkAppendMarkerException(exception); completableFuture.completeExceptionally(new PersistenceException(exception)); } }, null); @@ -273,8 +329,8 @@ public void addFailed(ManagedLedgerException exception, Object ctx) { commitMarker.release(); } }).exceptionally(exception -> { - log.error("Transaction {} commit on topic {}.", txnID.toString(), topic.getName(), exception); - completableFuture.completeExceptionally(exception); + log.error("Transaction {} commit on topic {}.", txnID.toString(), topic.getName(), exception.getCause()); + completableFuture.completeExceptionally(exception.getCause()); return null; }); return completableFuture; @@ -301,17 +357,18 @@ public void addComplete(Position position, ByteBuf entryData, Object ctx) { synchronized (TopicTransactionBuffer.this) { aborts.put(txnID, (PositionImpl) position); updateMaxReadPosition(txnID); - handleLowWaterMark(txnID, lowWaterMark); changeMaxReadPositionAndAddAbortTimes.getAndIncrement(); clearAbortedTransactions(); takeSnapshotByChangeTimes(); } completableFuture.complete(null); + handleLowWaterMark(txnID, lowWaterMark); } @Override public void addFailed(ManagedLedgerException exception, Object ctx) { log.error("Failed to abort for txn {}", txnID, exception); + checkAppendMarkerException(exception); completableFuture.completeExceptionally(new PersistenceException(exception)); } }, null); @@ -319,38 +376,50 @@ public void addFailed(ManagedLedgerException exception, Object ctx) { abortMarker.release(); } }).exceptionally(exception -> { - log.error("Transaction {} abort on topic {}.", txnID.toString(), topic.getName()); - completableFuture.completeExceptionally(exception); + log.error("Transaction {} abort on topic {}.", txnID.toString(), topic.getName(), exception.getCause()); + completableFuture.completeExceptionally(exception.getCause()); return null; }); return completableFuture; } - private void handleLowWaterMark(TxnID txnID, long lowWaterMark) { - if (!ongoingTxns.isEmpty()) { - TxnID firstTxn = ongoingTxns.firstKey(); - if (firstTxn.getMostSigBits() == txnID.getMostSigBits() && lowWaterMark >= firstTxn.getLeastSigBits()) { - ByteBuf abortMarker = Markers.newTxnAbortMarker(-1L, - firstTxn.getMostSigBits(), firstTxn.getLeastSigBits()); - try { - topic.getManagedLedger().asyncAddEntry(abortMarker, new AsyncCallbacks.AddEntryCallback() { - @Override - public void addComplete(Position position, ByteBuf entryData, Object ctx) { - synchronized (TopicTransactionBuffer.this) { - aborts.put(firstTxn, (PositionImpl) position); - updateMaxReadPosition(firstTxn); - } - } + private void checkAppendMarkerException(ManagedLedgerException exception) { + if (exception instanceof ManagedLedgerException.ManagedLedgerAlreadyClosedException) { + topic.getManagedLedger().readyToCreateNewLedger(); + } + } - @Override - public void addFailed(ManagedLedgerException exception, Object ctx) { - log.error("Failed to abort low water mark for txn {}", txnID, exception); - } - }, null); - } finally { - abortMarker.release(); + private void handleLowWaterMark(TxnID txnID, long lowWaterMark) { + lowWaterMarks.compute(txnID.getMostSigBits(), (tcId, oldLowWaterMark) -> { + if (oldLowWaterMark == null || oldLowWaterMark < lowWaterMark) { + return lowWaterMark; + } else { + return oldLowWaterMark; + } + }); + if (handleLowWaterMark.tryAcquire()) { + if (!ongoingTxns.isEmpty()) { + TxnID firstTxn = ongoingTxns.firstKey(); + long tCId = firstTxn.getMostSigBits(); + Long lowWaterMarkOfFirstTxnId = lowWaterMarks.get(tCId); + if (lowWaterMarkOfFirstTxnId != null && firstTxn.getLeastSigBits() <= lowWaterMarkOfFirstTxnId) { + abortTxn(firstTxn, lowWaterMarkOfFirstTxnId) + .thenRun(() -> { + log.warn("Successes to abort low water mark for txn [{}], topic [{}]," + + " lowWaterMark [{}]", firstTxn, topic.getName(), lowWaterMarkOfFirstTxnId); + handleLowWaterMark.release(); + }) + .exceptionally(ex -> { + log.warn("Failed to abort low water mark for txn {}, topic [{}], " + + "lowWaterMark [{}], ", firstTxn, topic.getName(), lowWaterMarkOfFirstTxnId, + ex); + handleLowWaterMark.release(); + return null; + }); + return; } } + handleLowWaterMark.release(); } } @@ -455,9 +524,13 @@ public void syncMaxReadPositionForNormalPublish(PositionImpl position) { // when ongoing transaction is empty, proved that lastAddConfirm is can read max position, because callback // thread is the same tread, in this time the lastAddConfirm don't content transaction message. synchronized (TopicTransactionBuffer.this) { - if (ongoingTxns.isEmpty()) { + if (checkIfNoSnapshot()) { maxReadPosition = position; - changeMaxReadPositionAndAddAbortTimes.incrementAndGet(); + } else if (checkIfReady()) { + if (ongoingTxns.isEmpty()) { + maxReadPosition = position; + changeMaxReadPositionAndAddAbortTimes.incrementAndGet(); + } } } } @@ -499,7 +572,8 @@ public void run(Timeout timeout) { // we store the maxReadPosition from snapshot then open the non-durable cursor by this topic's manageLedger. // the non-durable cursor will read to lastConfirmedEntry. - static class TopicTransactionBufferRecover implements Runnable { + @VisibleForTesting + public static class TopicTransactionBufferRecover implements Runnable { private final PersistentTopic topic; @@ -515,106 +589,124 @@ static class TopicTransactionBufferRecover implements Runnable { private final TopicTransactionBuffer topicTransactionBuffer; + private final CompletableFuture> takeSnapshotWriter; + private TopicTransactionBufferRecover(TopicTransactionBufferRecoverCallBack callBack, PersistentTopic topic, - TopicTransactionBuffer transactionBuffer) { + TopicTransactionBuffer transactionBuffer, CompletableFuture< + SystemTopicClient.Writer> takeSnapshotWriter) { this.topic = topic; this.callBack = callBack; this.entryQueue = new SpscArrayQueue<>(2000); this.topicTransactionBuffer = transactionBuffer; + this.takeSnapshotWriter = takeSnapshotWriter; } @SneakyThrows @Override public void run() { - this.topicTransactionBuffer.changeToInitializingState(); - topic.getBrokerService().getPulsar().getTransactionBufferSnapshotService() - .createReader(TopicName.get(topic.getName())).thenAcceptAsync(reader -> { - try { - boolean hasSnapshot = false; - while (reader.hasMoreEvents()) { - hasSnapshot = true; - Message message = reader.readNext(); - TransactionBufferSnapshot transactionBufferSnapshot = message.getValue(); - if (topic.getName().equals(transactionBufferSnapshot.getTopicName())) { - callBack.handleSnapshot(transactionBufferSnapshot); - this.startReadCursorPosition = PositionImpl.get( - transactionBufferSnapshot.getMaxReadPositionLedgerId(), - transactionBufferSnapshot.getMaxReadPositionEntryId()); - } - } - if (!hasSnapshot) { - callBack.noNeedToRecover(); - return; - } - } catch (PulsarClientException pulsarClientException) { - log.error("[{}]Transaction buffer recover fail when read " - + "transactionBufferSnapshot!", topic.getName(), pulsarClientException); - callBack.recoverExceptionally(pulsarClientException); - reader.closeAsync().exceptionally(e -> { - log.error("[{}]Transaction buffer reader close error!", topic.getName(), e); - return null; - }); + this.takeSnapshotWriter.thenRunAsync(() -> { + if (!this.topicTransactionBuffer.changeToInitializingState()) { + log.warn("TransactionBuffer {} of topic {} can not change state to Initializing", + this, topic.getName()); return; } - reader.closeAsync().exceptionally(e -> { - log.error("[{}]Transaction buffer reader close error!", topic.getName(), e); - return null; - }); - - ManagedCursor managedCursor; - try { - managedCursor = topic.getManagedLedger() - .newNonDurableCursor(this.startReadCursorPosition, SUBSCRIPTION_NAME); - } catch (ManagedLedgerException e) { - callBack.recoverExceptionally(e); - log.error("[{}]Transaction buffer recover fail when open cursor!", topic.getName(), e); - return; - } - PositionImpl lastConfirmedEntry = (PositionImpl) topic.getManagedLedger().getLastConfirmedEntry(); - PositionImpl currentLoadPosition = (PositionImpl) this.startReadCursorPosition; - FillEntryQueueCallback fillEntryQueueCallback = new FillEntryQueueCallback(entryQueue, managedCursor, - TopicTransactionBufferRecover.this); - if (lastConfirmedEntry.getEntryId() != -1) { - while (lastConfirmedEntry.compareTo(currentLoadPosition) > 0) { - fillEntryQueueCallback.fillQueue(); - Entry entry = entryQueue.poll(); - if (entry != null) { + topic.getBrokerService().getPulsar().getTransactionBufferSnapshotService() + .createReader(TopicName.get(topic.getName())).thenAcceptAsync(reader -> { try { - currentLoadPosition = PositionImpl.get(entry.getLedgerId(), entry.getEntryId()); - callBack.handleTxnEntry(entry); - } finally { - entry.release(); + boolean hasSnapshot = false; + while (reader.hasMoreEvents()) { + Message message = reader.readNext(); + if (topic.getName().equals(message.getKey())) { + TransactionBufferSnapshot transactionBufferSnapshot = message.getValue(); + if (transactionBufferSnapshot != null) { + hasSnapshot = true; + callBack.handleSnapshot(transactionBufferSnapshot); + this.startReadCursorPosition = PositionImpl.get( + transactionBufferSnapshot.getMaxReadPositionLedgerId(), + transactionBufferSnapshot.getMaxReadPositionEntryId()); + } + } + } + if (!hasSnapshot) { + closeReader(reader); + callBack.noNeedToRecover(); + return; + } + } catch (Exception ex) { + log.error("[{}] Transaction buffer recover fail when read " + + "transactionBufferSnapshot!", topic.getName(), ex); + callBack.recoverExceptionally(ex); + closeReader(reader); + return; } - } else { + closeReader(reader); + + ManagedCursor managedCursor; try { - Thread.sleep(1); - } catch (InterruptedException e) { - //no-op + managedCursor = topic.getManagedLedger() + .newNonDurableCursor(this.startReadCursorPosition, SUBSCRIPTION_NAME); + } catch (ManagedLedgerException e) { + callBack.recoverExceptionally(e); + log.error("[{}]Transaction buffer recover fail when open cursor!", topic.getName(), e); + return; + } + PositionImpl lastConfirmedEntry = + (PositionImpl) topic.getManagedLedger().getLastConfirmedEntry(); + PositionImpl currentLoadPosition = (PositionImpl) this.startReadCursorPosition; + FillEntryQueueCallback fillEntryQueueCallback = new FillEntryQueueCallback(entryQueue, + managedCursor, TopicTransactionBufferRecover.this); + if (lastConfirmedEntry.getEntryId() != -1) { + while (lastConfirmedEntry.compareTo(currentLoadPosition) > 0 + && fillEntryQueueCallback.fillQueue()) { + Entry entry = entryQueue.poll(); + if (entry != null) { + try { + currentLoadPosition = PositionImpl.get(entry.getLedgerId(), + entry.getEntryId()); + callBack.handleTxnEntry(entry); + } finally { + entry.release(); + } + } else { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + //no-op + } + } + } } - } - } - } - closeCursor(managedCursor); - callBack.recoverComplete(); - }).exceptionally(e -> { - callBack.recoverExceptionally(new Exception(e)); - log.error("[{}]Transaction buffer new snapshot reader fail!", topic.getName(), e); + closeCursor(SUBSCRIPTION_NAME); + callBack.recoverComplete(); + }, topic.getBrokerService().getPulsar().getTransactionExecutorProvider() + .getExecutor(this)).exceptionally(e -> { + callBack.recoverExceptionally(e.getCause()); + log.error("[{}]Transaction buffer new snapshot reader fail!", topic.getName(), e); + return null; + }); + }, topic.getBrokerService().getPulsar().getTransactionExecutorProvider() + .getExecutor(this)).exceptionally(e -> { + callBack.recoverExceptionally(e.getCause()); + log.error("[{}]Transaction buffer create snapshot writer fail!", + topic.getName(), e); return null; }); } - private void closeCursor(ManagedCursor cursor) { - cursor.asyncClose(new AsyncCallbacks.CloseCallback() { + private void closeCursor(String subscriptionName) { + topic.getManagedLedger().asyncDeleteCursor(Codec.encode(subscriptionName), + new AsyncCallbacks.DeleteCursorCallback() { @Override - public void closeComplete(Object ctx) { + public void deleteCursorComplete(Object ctx) { log.info("[{}]Transaction buffer snapshot recover cursor close complete.", topic.getName()); } @Override - public void closeFailed(ManagedLedgerException exception, Object ctx) { + public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) { log.error("[{}]Transaction buffer snapshot recover cursor close fail.", topic.getName()); } + }, null); } @@ -622,6 +714,13 @@ private void callBackException(ManagedLedgerException e) { log.error("Transaction buffer recover fail when recover transaction entry!", e); this.exceptionNumber.getAndIncrement(); } + + private void closeReader(SystemTopicClient.Reader reader) { + reader.closeAsync().exceptionally(e -> { + log.error("[{}]Transaction buffer reader close error!", topic.getName(), e); + return null; + }); + } } static class FillEntryQueueCallback implements AsyncCallbacks.ReadEntriesCallback { @@ -634,19 +733,29 @@ static class FillEntryQueueCallback implements AsyncCallbacks.ReadEntriesCallbac private final TopicTransactionBufferRecover recover; + private volatile boolean isReadable = true; + + private static final int NUMBER_OF_PER_READ_ENTRY = 100; + private FillEntryQueueCallback(SpscArrayQueue entryQueue, ManagedCursor cursor, TopicTransactionBufferRecover recover) { this.entryQueue = entryQueue; this.cursor = cursor; this.recover = recover; } - void fillQueue() { - if (entryQueue.size() < entryQueue.capacity() && outstandingReadsRequests.get() == 0) { + boolean fillQueue() { + if (entryQueue.size() + NUMBER_OF_PER_READ_ENTRY < entryQueue.capacity() + && outstandingReadsRequests.get() == 0) { if (cursor.hasMoreEntries()) { outstandingReadsRequests.incrementAndGet(); - cursor.asyncReadEntries(100, this, System.nanoTime(), PositionImpl.latest); + cursor.asyncReadEntries(NUMBER_OF_PER_READ_ENTRY, this, System.nanoTime(), PositionImpl.latest); + } else { + if (entryQueue.size() == 0) { + isReadable = false; + } } } + return isReadable; } @Override @@ -666,8 +775,15 @@ public Entry get() { @Override public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + if (recover.topic.getManagedLedger().getConfig().isAutoSkipNonRecoverableData() + && exception instanceof ManagedLedgerException.NonRecoverableLedgerException + || exception instanceof ManagedLedgerException.ManagedLedgerFencedException + || exception instanceof ManagedLedgerException.CursorAlreadyClosedException) { + isReadable = false; + } else { + outstandingReadsRequests.decrementAndGet(); + } recover.callBackException(exception); - outstandingReadsRequests.decrementAndGet(); } } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBufferRecoverCallBack.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBufferRecoverCallBack.java index 1640459027255..87b8e930a2792 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBufferRecoverCallBack.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TopicTransactionBufferRecoverCallBack.java @@ -51,5 +51,5 @@ public interface TopicTransactionBufferRecoverCallBack { /** * Topic transaction buffer recover exceptionally. */ - void recoverExceptionally(Exception e); + void recoverExceptionally(Throwable e); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferClientImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferClientImpl.java index e774f1282a8a7..c531f9f1871d6 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferClientImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferClientImpl.java @@ -21,7 +21,8 @@ import io.netty.util.HashedWheelTimer; import java.util.concurrent.CompletableFuture; import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.broker.PulsarServerException; +import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.client.api.transaction.TransactionBufferClient; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.transaction.TransactionBufferHandler; @@ -39,8 +40,10 @@ private TransactionBufferClientImpl(TransactionBufferHandler tbHandler) { this.tbHandler = tbHandler; } - public static TransactionBufferClient create(PulsarClient pulsarClient, HashedWheelTimer timer) { - TransactionBufferHandler handler = new TransactionBufferHandlerImpl(pulsarClient, timer); + public static TransactionBufferClient create(PulsarService pulsarService, HashedWheelTimer timer, + int maxConcurrentRequests, long operationTimeoutInMills) throws PulsarServerException { + TransactionBufferHandler handler = new TransactionBufferHandlerImpl(pulsarService, timer, + maxConcurrentRequests, operationTimeoutInMills); return new TransactionBufferClientImpl(handler); } @@ -74,4 +77,14 @@ public CompletableFuture abortTxnOnSubscription(String topic, String subs public void close() { tbHandler.close(); } + + @Override + public int getAvailableRequestCredits() { + return tbHandler.getAvailableRequestCredits(); + } + + @Override + public int getPendingRequestsCount() { + return tbHandler.getPendingRequestsCount(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java index 54f77a331647c..3f9a083787bb5 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/impl/TransactionBufferHandlerImpl.java @@ -18,27 +18,25 @@ */ package org.apache.pulsar.broker.transaction.buffer.impl; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; import io.netty.buffer.ByteBuf; import io.netty.util.HashedWheelTimer; import io.netty.util.Recycler; import io.netty.util.ReferenceCountUtil; -import io.netty.util.Timeout; -import io.netty.util.TimerTask; -import java.util.Map; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicLong; import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.broker.PulsarServerException; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; +import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.transaction.TransactionBufferClientException; -import org.apache.pulsar.client.api.transaction.TransactionBufferClientException.ReachMaxPendingOpsException; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.PulsarClientImpl; @@ -46,66 +44,59 @@ import org.apache.pulsar.common.api.proto.CommandEndTxnOnPartitionResponse; import org.apache.pulsar.common.api.proto.CommandEndTxnOnSubscriptionResponse; import org.apache.pulsar.common.api.proto.TxnAction; +import org.apache.pulsar.common.naming.NamespaceBundle; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.protocol.Commands; +import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.collections.GrowableArrayBlockingQueue; @Slf4j -public class TransactionBufferHandlerImpl implements TransactionBufferHandler, TimerTask { +public class TransactionBufferHandlerImpl implements TransactionBufferHandler { - private final ConcurrentSkipListMap pendingRequests; + private final ConcurrentSkipListMap outstandingRequests; + private final GrowableArrayBlockingQueue pendingRequests; private final AtomicLong requestIdGenerator = new AtomicLong(); private final long operationTimeoutInMills; - private Timeout requestTimeout; private final HashedWheelTimer timer; - private final Semaphore semaphore; - private final boolean blockIfReachMaxPendingOps; - private final PulsarClient pulsarClient; + private final PulsarService pulsarService; + private final PulsarClientImpl pulsarClient; - private final LoadingCache> cache = CacheBuilder.newBuilder() - .maximumSize(100000) - .expireAfterAccess(30, TimeUnit.MINUTES) - .build(new CacheLoader>() { - @Override - public CompletableFuture load(String topic) { - CompletableFuture siFuture = getClientCnx(topic); - siFuture.whenComplete((si, cause) -> { - if (null != cause) { - cache.invalidate(topic); - } - }); - return siFuture; - } - }); + private static final AtomicIntegerFieldUpdater REQUEST_CREDITS_UPDATER = + AtomicIntegerFieldUpdater.newUpdater(TransactionBufferHandlerImpl.class, "requestCredits"); + private volatile int requestCredits; - public TransactionBufferHandlerImpl(PulsarClient pulsarClient, - HashedWheelTimer timer) { - this.pulsarClient = pulsarClient; - this.pendingRequests = new ConcurrentSkipListMap<>(); - this.operationTimeoutInMills = 3000L; - this.semaphore = new Semaphore(10000); - this.blockIfReachMaxPendingOps = true; + public TransactionBufferHandlerImpl(PulsarService pulsarService, HashedWheelTimer timer, + int maxConcurrentRequests, long operationTimeoutInMills) throws PulsarServerException { + this.pulsarService = pulsarService; + this.pulsarClient = (PulsarClientImpl) pulsarService.getClient(); + this.outstandingRequests = new ConcurrentSkipListMap<>(); + this.pendingRequests = new GrowableArrayBlockingQueue<>(); + this.operationTimeoutInMills = operationTimeoutInMills; this.timer = timer; - this.requestTimeout = timer.newTimeout(this, operationTimeoutInMills, TimeUnit.MILLISECONDS); + this.requestCredits = Math.max(100, maxConcurrentRequests); } @Override - public synchronized CompletableFuture endTxnOnTopic(String topic, long txnIdMostBits, long txnIdLeastBits, + public CompletableFuture endTxnOnTopic(String topic, long txnIdMostBits, long txnIdLeastBits, TxnAction action, long lowWaterMark) { if (log.isDebugEnabled()) { log.debug("[{}] endTxnOnTopic txnId: [{}], txnAction: [{}]", topic, new TxnID(txnIdMostBits, txnIdLeastBits), action.getValue()); } CompletableFuture cb = new CompletableFuture<>(); - if (!canSendRequest(cb)) { - return cb; - } long requestId = requestIdGenerator.getAndIncrement(); ByteBuf cmd = Commands.newEndTxnOnPartition(requestId, txnIdLeastBits, txnIdMostBits, topic, action, lowWaterMark); - return endTxn(requestId, topic, cmd, cb); + + OpRequestSend op = OpRequestSend.create(requestId, topic, cmd, cb, getClientCnx(topic)); + if (checkRequestCredits(op)) { + endTxn(op); + } + return cb; } @Override - public synchronized CompletableFuture endTxnOnSubscription(String topic, String subscription, + public CompletableFuture endTxnOnSubscription(String topic, String subscription, long txnIdMostBits, long txnIdLeastBits, TxnAction action, long lowWaterMark) { if (log.isDebugEnabled()) { @@ -113,53 +104,68 @@ public synchronized CompletableFuture endTxnOnSubscription(String topic, topic, new TxnID(txnIdMostBits, txnIdLeastBits), action.getValue()); } CompletableFuture cb = new CompletableFuture<>(); - if (!canSendRequest(cb)) { - return cb; - } long requestId = requestIdGenerator.getAndIncrement(); ByteBuf cmd = Commands.newEndTxnOnSubscription(requestId, txnIdLeastBits, txnIdMostBits, topic, subscription, action, lowWaterMark); - return endTxn(requestId, topic, cmd, cb); + OpRequestSend op = OpRequestSend.create(requestId, topic, cmd, cb, getClientCnx(topic)); + if (checkRequestCredits(op)) { + endTxn(op); + } + return cb; } - private CompletableFuture endTxn(long requestId, String topic, ByteBuf cmd, CompletableFuture cb) { - OpRequestSend op = OpRequestSend.create(requestId, topic, cmd, cb); - try { - cache.get(topic).whenComplete((clientCnx, throwable) -> { - if (throwable == null) { - if (clientCnx.ctx().channel().isActive()) { - clientCnx.registerTransactionBufferHandler(TransactionBufferHandlerImpl.this); - synchronized (TransactionBufferHandlerImpl.this) { - pendingRequests.put(requestId, op); - cmd.retain(); + private boolean checkRequestCredits(OpRequestSend op) { + int currentPermits = REQUEST_CREDITS_UPDATER.get(this); + if (currentPermits > 0 && pendingRequests.peek() == null) { + if (REQUEST_CREDITS_UPDATER.compareAndSet(this, currentPermits, currentPermits - 1)) { + return true; + } else { + return checkRequestCredits(op); + } + } else { + pendingRequests.add(op); + return false; + } + } + + public void endTxn(OpRequestSend op) { + op.cnx.whenComplete((clientCnx, ex) -> { + if (ex == null) { + if (clientCnx.ctx().channel().isActive()) { + clientCnx.registerTransactionBufferHandler(TransactionBufferHandlerImpl.this); + outstandingRequests.put(op.requestId, op); + timer.newTimeout(timeout -> { + OpRequestSend peek = outstandingRequests.remove(op.requestId); + if (peek != null && !peek.cb.isDone() && !peek.cb.isCompletedExceptionally()) { + peek.cb.completeExceptionally(new TransactionBufferClientException + .RequestTimeoutException()); + onResponse(peek); } - clientCnx.ctx().writeAndFlush(cmd, clientCnx.ctx().voidPromise()); - } else { - cache.invalidate(topic); - cb.completeExceptionally( - new PulsarClientException.LookupException(topic + " endTxn channel is not active")); - op.recycle(); - } + }, operationTimeoutInMills, TimeUnit.MILLISECONDS); + op.cmd.retain(); + clientCnx.ctx().writeAndFlush(op.cmd, clientCnx.ctx().voidPromise()); } else { - log.error("endTxn error topic: [{}]", topic, throwable); - cache.invalidate(topic); - cb.completeExceptionally( - new PulsarClientException.LookupException(throwable.getMessage())); - op.recycle(); + op.cb.completeExceptionally( + new PulsarClientException.LookupException(op.topic + " endTxn channel is not active")); + onResponse(op); } - }); - } catch (ExecutionException e) { - log.error("endTxn channel is not active exception", e); - cache.invalidate(topic); - cb.completeExceptionally(new PulsarClientException.LookupException(e.getCause().getMessage())); - op.recycle(); - } - return cb; + } else { + Throwable cause = FutureUtil.unwrapCompletionException(ex); + log.error("endTxn error topic: [{}]", op.topic, cause); + if (cause instanceof PulsarClientException.BrokerMetadataException) { + op.cb.complete(null); + } else { + op.cb.completeExceptionally( + new PulsarClientException.LookupException(cause.getMessage())); + } + onResponse(op); + } + }); } @Override - public synchronized void handleEndTxnOnTopicResponse(long requestId, CommandEndTxnOnPartitionResponse response) { - OpRequestSend op = pendingRequests.remove(requestId); + public void handleEndTxnOnTopicResponse(long requestId, CommandEndTxnOnPartitionResponse response) { + OpRequestSend op = outstandingRequests.remove(requestId); if (op == null) { if (log.isDebugEnabled()) { log.debug("Got end txn on topic response for timeout {} - {}", response.getTxnidMostBits(), @@ -167,25 +173,29 @@ public synchronized void handleEndTxnOnTopicResponse(long requestId, CommandEndT } return; } - - if (!response.hasError()) { - if (log.isDebugEnabled()) { - log.debug("[{}] Got end txn on topic response for for request {}", op.topic, response.getRequestId()); + try { + if (!response.hasError()) { + if (log.isDebugEnabled()) { + log.debug("[{}] Got end txn on topic response for for request {}", op.topic, + response.getRequestId()); + } + op.cb.complete(new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits())); + } else { + log.error("[{}] Got end txn on topic response for request {} error {}", op.topic, + response.getRequestId(), + response.getError()); + op.cb.completeExceptionally(ClientCnx.getPulsarClientException(response.getError(), + response.getMessage())); } - op.cb.complete(new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits())); - } else { - log.error("[{}] Got end txn on topic response for request {} error {}", op.topic, response.getRequestId(), - response.getError()); - cache.invalidate(op.topic); - op.cb.completeExceptionally(ClientCnx.getPulsarClientException(response.getError(), response.getMessage())); + } finally { + onResponse(op); } - onResponse(op); } @Override - public synchronized void handleEndTxnOnSubscriptionResponse(long requestId, + public void handleEndTxnOnSubscriptionResponse(long requestId, CommandEndTxnOnSubscriptionResponse response) { - OpRequestSend op = pendingRequests.remove(requestId); + OpRequestSend op = outstandingRequests.remove(requestId); if (op == null) { if (log.isDebugEnabled()) { log.debug("Got end txn on subscription response for timeout {} - {}", response.getTxnidMostBits(), @@ -194,87 +204,76 @@ public synchronized void handleEndTxnOnSubscriptionResponse(long requestId, return; } - if (!response.hasError()) { - if (log.isDebugEnabled()) { - log.debug("[{}] Got end txn on subscription response for for request {}", - op.topic, response.getRequestId()); - } - op.cb.complete(new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits())); - } else { - log.error("[{}] Got end txn on subscription response for request {} error {}", - op.topic, response.getRequestId(), response.getError()); - cache.invalidate(op.topic); - op.cb.completeExceptionally(ClientCnx.getPulsarClientException(response.getError(), response.getMessage())); - } - onResponse(op); - } - - private boolean canSendRequest(CompletableFuture callback) { try { - if (blockIfReachMaxPendingOps) { - semaphore.acquire(); - } else { - if (!semaphore.tryAcquire()) { - callback.completeExceptionally(new ReachMaxPendingOpsException("Reach max pending ops.")); - return false; + if (!response.hasError()) { + if (log.isDebugEnabled()) { + log.debug("[{}] Got end txn on subscription response for for request {}", + op.topic, response.getRequestId()); } + op.cb.complete(new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits())); + } else { + log.error("[{}] Got end txn on subscription response for request {} error {}", + op.topic, response.getRequestId(), response.getError()); + op.cb.completeExceptionally(ClientCnx.getPulsarClientException(response.getError(), + response.getMessage())); } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - callback.completeExceptionally(TransactionBufferClientException.unwrap(e)); - return false; + } finally { + onResponse(op); } - return true; } - public synchronized void run(Timeout timeout) throws Exception { - if (timeout.isCancelled()) { - return; + public void onResponse(OpRequestSend op) { + REQUEST_CREDITS_UPDATER.incrementAndGet(this); + if (op != null) { + ReferenceCountUtil.safeRelease(op.cmd); + op.recycle(); } - long timeToWaitMs; - OpRequestSend peeked; - Map.Entry firstEntry = pendingRequests.firstEntry(); - peeked = firstEntry == null ? null : firstEntry.getValue(); - while (peeked != null && peeked.createdAt + operationTimeoutInMills - System.currentTimeMillis() <= 0) { - if (!peeked.cb.isDone()) { - peeked.cb.completeExceptionally(new TransactionBufferClientException.RequestTimeoutException()); - onResponse(peeked); + checkPendingRequests(); + } + + private void checkPendingRequests() { + while (true) { + int permits = REQUEST_CREDITS_UPDATER.get(this); + if (permits > 0 && pendingRequests.peek() != null) { + if (REQUEST_CREDITS_UPDATER.compareAndSet(this, permits, permits - 1)) { + OpRequestSend polled = pendingRequests.poll(); + if (polled != null) { + CompletableFuture clientCnx = getClientCnx(polled.topic); + if (polled.cnx != clientCnx) { + OpRequestSend invalid = polled; + polled = OpRequestSend.create(invalid.requestId, invalid.topic, invalid.cmd, invalid.cb, + clientCnx); + invalid.recycle(); + } + endTxn(polled); + } else { + REQUEST_CREDITS_UPDATER.incrementAndGet(this); + } + } } else { break; } - firstEntry = pendingRequests.firstEntry(); - pendingRequests.remove(pendingRequests.firstKey()); - peeked = firstEntry == null ? null : firstEntry.getValue(); - } - if (peeked == null) { - timeToWaitMs = operationTimeoutInMills; - } else { - timeToWaitMs = (peeked.createdAt + operationTimeoutInMills) - System.currentTimeMillis(); } - requestTimeout = timer.newTimeout(this, timeToWaitMs, TimeUnit.MILLISECONDS); - } - - void onResponse(OpRequestSend op) { - ReferenceCountUtil.safeRelease(op.byteBuf); - op.recycle(); - semaphore.release(); } - private static final class OpRequestSend { + public static final class OpRequestSend { long requestId; String topic; - ByteBuf byteBuf; + ByteBuf cmd; CompletableFuture cb; long createdAt; + CompletableFuture cnx; - static OpRequestSend create(long requestId, String topic, ByteBuf byteBuf, CompletableFuture cb) { + static OpRequestSend create(long requestId, String topic, ByteBuf cmd, CompletableFuture cb, + CompletableFuture cnx) { OpRequestSend op = RECYCLER.get(); op.requestId = requestId; op.topic = topic; - op.byteBuf = byteBuf; + op.cmd = cmd; op.cb = cb; op.createdAt = System.currentTimeMillis(); + op.cnx = cnx; return op; } @@ -296,12 +295,56 @@ protected OpRequestSend newObject(Handle handle) { }; } - private CompletableFuture getClientCnx(String topic) { - return ((PulsarClientImpl) pulsarClient).getConnection(topic); + public CompletableFuture getClientCnxWithLookup(String topic) { + return pulsarClient.getConnection(topic); + } + + public CompletableFuture getClientCnx(String topic) { + NamespaceService namespaceService = pulsarService.getNamespaceService(); + CompletableFuture nsBundle = namespaceService.getBundleAsync(TopicName.get(topic)); + return nsBundle + .thenCompose(bundle -> namespaceService.getOwnerAsync(bundle)) + .thenCompose(data -> { + if (data.isPresent()) { + NamespaceEphemeralData ephemeralData = data.get(); + try { + if (!ephemeralData.isDisabled()) { + URI uri; + if (pulsarClient.getConfiguration().isUseTls()) { + uri = new URI(ephemeralData.getNativeUrlTls()); + } else { + uri = new URI(ephemeralData.getNativeUrl()); + } + InetSocketAddress brokerAddress = + InetSocketAddress.createUnresolved(uri.getHost(), uri.getPort()); + return pulsarClient.getConnection(brokerAddress, brokerAddress); + } else { + // Bundle is unloading, lookup topic + return getClientCnxWithLookup(topic); + } + } catch (URISyntaxException e) { + // Should never go here + return getClientCnxWithLookup(topic); + } + } else { + // Bundle is not loaded yet, lookup topic + return getClientCnxWithLookup(topic); + } + }); } @Override public void close() { this.timer.stop(); } + + @Override + public int getAvailableRequestCredits() { + return REQUEST_CREDITS_UPDATER.get(this); + } + + @Override + public int getPendingRequestsCount() { + return pendingRequests.size(); + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/TransactionException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/TransactionException.java new file mode 100644 index 0000000000000..f81fc8a724d5a --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/TransactionException.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.exception; + +import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.transaction.coordinator.proto.TxnStatus; + +/** + * The base exception class for the errors thrown from Transaction. + */ +public abstract class TransactionException extends Exception { + + private static final long serialVersionUID = 0L; + + public TransactionException(String message) { + super(message); + } + + public TransactionException(String message, Throwable cause) { + super(message, cause); + } + + public TransactionException(Throwable cause) { + super(cause); + } + + /** + * Exception is thrown when opening a reader on a transaction that is not sealed yet. + */ + public static class TransactionNotSealedException extends TransactionException { + + private static final long serialVersionUID = 0L; + + public TransactionNotSealedException(String message) { + super(message); + } + } + + /** + * Exception thrown if a transaction is already sealed. + * + *

If a transaction is sealed, no more entries should be appended to this transaction. + */ + public static class TransactionSealedException extends TransactionException { + + private static final long serialVersionUID = 5366602873819540477L; + + public TransactionSealedException(String message) { + super(message); + } + } + + /** + * Exceptions are thrown when operations are applied to a transaction which is not in expected txn status. + */ + public static class TransactionStatusException extends TransactionException { + + private static final long serialVersionUID = 0L; + + public TransactionStatusException(TxnID txnId, + TxnStatus expectedStatus, + TxnStatus actualStatus) { + super("Transaction `q" + txnId + "` is not in an expected status `" + expectedStatus + + "`, but is in status `" + actualStatus + "`"); + } + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/buffer/TransactionBufferException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/buffer/TransactionBufferException.java new file mode 100644 index 0000000000000..ab301c9017dda --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/buffer/TransactionBufferException.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.exception.buffer; + +import org.apache.pulsar.broker.transaction.exception.TransactionException; + +/** + * The base exception class for the errors thrown from Transaction Buffer. + */ +public abstract class TransactionBufferException extends TransactionException { + + private static final long serialVersionUID = 0L; + + public TransactionBufferException(String message) { + super(message); + } + + public TransactionBufferException(String message, Throwable cause) { + super(message, cause); + } + + public TransactionBufferException(Throwable cause) { + super(cause); + } + + + /** + * Exception thrown when reaching end of a transaction. + */ + public static class EndOfTransactionException extends TransactionBufferException { + + private static final long serialVersionUID = 0L; + + public EndOfTransactionException(String message) { + super(message); + } + } + + /** + * Exception is thrown when no transactions found committed at a given ledger. + */ + public class NoTxnsCommittedAtLedgerException extends TransactionBufferException { + + private static final long serialVersionUID = 0L; + + public NoTxnsCommittedAtLedgerException(String message) { + super(message); + } + } + + /** + * Transaction buffer provider exception. + */ + public class TransactionBufferProviderException extends TransactionBufferException { + + public TransactionBufferProviderException(String message) { + super(message); + } + + } + + /** + * Exception is thrown when the transaction is not found in the transaction buffer. + */ + public static class TransactionNotFoundException extends TransactionBufferException { + + private static final long serialVersionUID = 0L; + + public TransactionNotFoundException(String message) { + super(message); + } + } + + +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/package-info.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/buffer/package-info.java similarity index 93% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/package-info.java rename to pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/buffer/package-info.java index 2aee740f6d9b8..60bb5ee2a0245 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/package-info.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/buffer/package-info.java @@ -19,4 +19,4 @@ /** * Exceptions thrown when encountering errors in transaction buffer. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; +package org.apache.pulsar.broker.transaction.exception.buffer; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/coordinator/TransactionCoordinatorException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/coordinator/TransactionCoordinatorException.java new file mode 100644 index 0000000000000..8da04ac174eb2 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/coordinator/TransactionCoordinatorException.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.exception.coordinator; + +import org.apache.pulsar.broker.transaction.exception.TransactionException; +import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.common.api.proto.TxnAction; + +/** + * The base exception class for the errors thrown from Transaction Coordinator. + */ +public abstract class TransactionCoordinatorException extends TransactionException { + + private static final long serialVersionUID = 0L; + + public TransactionCoordinatorException(String message) { + super(message); + } + + public TransactionCoordinatorException(String message, Throwable cause) { + super(message, cause); + } + + public TransactionCoordinatorException(Throwable cause) { + super(cause); + } + + + /** + * Exceptions are thrown when txnAction is unsupported. + */ + public static class UnsupportedTxnActionException extends TransactionCoordinatorException { + + private static final long serialVersionUID = 0L; + + public UnsupportedTxnActionException(TxnID txnId, int txnAction) { + super("Transaction `" + txnId + "` receive unsupported txnAction " + TxnAction.valueOf(txnAction)); + } + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/coordinator/package-info.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/coordinator/package-info.java new file mode 100644 index 0000000000000..ceaff6ac80311 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/coordinator/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Exceptions thrown when encountering errors in transaction buffer. + */ +package org.apache.pulsar.broker.transaction.exception.coordinator; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/package-info.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/package-info.java new file mode 100644 index 0000000000000..222d871178629 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Exceptions thrown when encountering errors in transaction buffer. + */ +package org.apache.pulsar.broker.transaction.exception; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionNotFoundException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/pendingack/TransactionPendingAckException.java similarity index 50% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionNotFoundException.java rename to pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/pendingack/TransactionPendingAckException.java index 0f1dc4696e4c1..74999bf1cb7ab 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionNotFoundException.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/pendingack/TransactionPendingAckException.java @@ -16,16 +16,37 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; +package org.apache.pulsar.broker.transaction.exception.pendingack; + +import org.apache.pulsar.broker.transaction.exception.TransactionException; /** - * Exception is thrown when the transaction is not found in the transaction buffer. + * The base exception class for the errors thrown from Transaction Pending ACk. */ -public class TransactionNotFoundException extends TransactionBufferException { +public abstract class TransactionPendingAckException extends TransactionException { private static final long serialVersionUID = 0L; - public TransactionNotFoundException(String message) { + public TransactionPendingAckException(String message) { super(message); } + + public TransactionPendingAckException(String message, Throwable cause) { + super(message, cause); + } + + public TransactionPendingAckException(Throwable cause) { + super(cause); + } + + /** + * Transaction pending ack store provider exception. + */ + public static class TransactionPendingAckStoreProviderException extends TransactionPendingAckException { + + public TransactionPendingAckStoreProviderException(String message) { + super(message); + } + + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/pendingack/package-info.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/pendingack/package-info.java new file mode 100644 index 0000000000000..0d0df63bd7673 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/exception/pendingack/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Exceptions thrown when encountering errors in transaction buffer. + */ +package org.apache.pulsar.broker.transaction.exception.pendingack; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckHandle.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckHandle.java index 3664c5d046f6d..620db5c4b4814 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckHandle.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckHandle.java @@ -50,15 +50,13 @@ public interface PendingAckHandle { * * @param txnID {@link TxnID} TransactionID of an ongoing transaction trying to sck message. * @param positions {@link MutablePair} the pair of positions and these batch size. - * @param isInCacheRequest {@link Boolean} the boolean of the request in cache whether or not. * @return the future of this operation. * @throws TransactionConflictException if the ack with transaction is conflict with pending ack. * @throws NotAllowedException if Use this method incorrectly eg. not use * PositionImpl or cumulative ack with a list of positions. */ CompletableFuture individualAcknowledgeMessage(TxnID txnID, List> positions, boolean isInCacheRequest); - + Integer>> positions); /** * Acknowledge message(s) for an ongoing transaction. *

@@ -76,14 +74,12 @@ CompletableFuture individualAcknowledgeMessage(TxnID txnID, List cumulativeAcknowledgeMessage(TxnID txnID, List positions, - boolean isInCacheRequest); + CompletableFuture cumulativeAcknowledgeMessage(TxnID txnID, List positions); /** * Commit a transaction. @@ -92,11 +88,9 @@ CompletableFuture cumulativeAcknowledgeMessage(TxnID txnID, List commitTxn(TxnID txnID, Map properties, - long lowWaterMark, boolean isInCacheRequest); + CompletableFuture commitTxn(TxnID txnID, Map properties, long lowWaterMark); /** * Abort a transaction. @@ -104,10 +98,9 @@ CompletableFuture commitTxn(TxnID txnID, Map properties, * @param txnId {@link TxnID} to identify the transaction. * @param consumer {@link Consumer} which aborting transaction. * @param lowWaterMark the low water mark of this transaction - * @param isInCacheRequest {@link Boolean} the boolean of the request in cache whether or not. * @return the future of this operation. */ - CompletableFuture abortTxn(TxnID txnId, Consumer consumer, long lowWaterMark, boolean isInCacheRequest); + CompletableFuture abortTxn(TxnID txnId, Consumer consumer, long lowWaterMark); /** * Sync the position ack set, in order to clean up the cache of this position for pending ack handle. @@ -159,4 +152,9 @@ CompletableFuture commitTxn(TxnID txnID, Map properties, */ CompletableFuture close(); + /** + * Check if the PendingAckStore is init. + * @return if the PendingAckStore is init. + */ + boolean checkIfPendingAckStoreInit(); } \ No newline at end of file diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckReplyCallBack.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckReplyCallBack.java index 3f2cc51f8ecb6..fed9add15657a 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckReplyCallBack.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckReplyCallBack.java @@ -36,4 +36,10 @@ public interface PendingAckReplyCallBack { * @param pendingAckMetadataEntry {@link PendingAckMetadataEntry} the metadata entry of pending ack */ void handleMetadataEntry(PendingAckMetadataEntry pendingAckMetadataEntry); + + /** + * Pending ack replay failed callback for pending ack store. + */ + void replayFailed(Throwable t); + } \ No newline at end of file diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckStore.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckStore.java index 3da676eb827d0..2f85d2430dbbd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckStore.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckStore.java @@ -20,7 +20,7 @@ import java.util.List; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ExecutorService; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.tuple.MutablePair; @@ -38,7 +38,7 @@ public interface PendingAckStore { * @param pendingAckHandle the handle of pending ack * @param executorService the replay executor service */ - void replayAsync(PendingAckHandleImpl pendingAckHandle, ScheduledExecutorService executorService); + void replayAsync(PendingAckHandleImpl pendingAckHandle, ExecutorService executorService); /** * Close the transaction pending ack store. diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/InMemoryPendingAckStore.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/InMemoryPendingAckStore.java index d882c80c47863..44c9fbe039b0b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/InMemoryPendingAckStore.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/InMemoryPendingAckStore.java @@ -20,7 +20,7 @@ import java.util.List; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ExecutorService; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.broker.transaction.pendingack.PendingAckStore; @@ -33,7 +33,7 @@ public class InMemoryPendingAckStore implements PendingAckStore { @Override - public void replayAsync(PendingAckHandleImpl pendingAckHandle, ScheduledExecutorService scheduledExecutorService) { + public void replayAsync(PendingAckHandleImpl pendingAckHandle, ExecutorService scheduledExecutorService) { pendingAckHandle.changeToReadyState(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckReplyCallBack.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckReplyCallBack.java index 6bcc344ddf392..53de549f69fcd 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckReplyCallBack.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckReplyCallBack.java @@ -23,6 +23,7 @@ import java.util.List; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.tuple.MutablePair; +import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.transaction.pendingack.PendingAckReplyCallBack; import org.apache.pulsar.broker.transaction.pendingack.proto.PendingAckMetadata; import org.apache.pulsar.broker.transaction.pendingack.proto.PendingAckMetadataEntry; @@ -44,7 +45,7 @@ public MLPendingAckReplyCallBack(PendingAckHandleImpl pendingAckHandle) { @Override public void replayComplete() { - synchronized (pendingAckHandle) { + pendingAckHandle.getInternalPinnedExecutor().execute(() -> { log.info("Topic name : [{}], SubName : [{}] pending ack state reply success!", pendingAckHandle.getTopicName(), pendingAckHandle.getSubName()); @@ -53,11 +54,20 @@ public void replayComplete() { log.info("Topic name : [{}], SubName : [{}] pending ack handle cache request success!", pendingAckHandle.getTopicName(), pendingAckHandle.getSubName()); } else { - log.error("Topic name : [{}], SubName : [{}] pending ack state reply fail!", - pendingAckHandle.getTopicName(), pendingAckHandle.getSubName()); + log.error("Topic name : [{}], SubName : [{}] pending ack state reply fail! current state: {}", + pendingAckHandle.getTopicName(), pendingAckHandle.getSubName(), pendingAckHandle.state); + replayFailed(new BrokerServiceException.ServiceUnitNotReadyException("Failed" + + " to change PendingAckHandle state to Ready, current state is : " + pendingAckHandle.state)); } + pendingAckHandle.handleCacheRequest(); + }); + } + + @Override + public void replayFailed(Throwable t) { + synchronized (pendingAckHandle) { + pendingAckHandle.exceptionHandleFuture(t); } - pendingAckHandle.handleCacheRequest(); } @Override diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java index b60f4ba21dc29..af4e664b1e33d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStore.java @@ -26,7 +26,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicLong; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.Entry; @@ -34,7 +34,6 @@ import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.Position; -import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.broker.service.BrokerServiceException.PersistenceException; @@ -43,6 +42,7 @@ import org.apache.pulsar.broker.transaction.pendingack.proto.PendingAckMetadata; import org.apache.pulsar.broker.transaction.pendingack.proto.PendingAckMetadataEntry; import org.apache.pulsar.broker.transaction.pendingack.proto.PendingAckOp; +import org.apache.pulsar.broker.transaction.util.LogIndexLagBackoff; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.proto.CommandAck.AckType; @@ -73,37 +73,44 @@ public class MLPendingAckStore implements PendingAckStore { private PositionImpl currentLoadPosition; + private final AtomicLong currentIndexLag = new AtomicLong(0); + private volatile long maxIndexLag; + + protected PositionImpl maxAckPosition = PositionImpl.earliest; + private final LogIndexLagBackoff logIndexBackoff; + /** * The map is for pending ack store clear useless data. *

- * When ack message append to pending ack store, it will store the position which is persistent as key. + * key:the largest ack position of origin topic, corresponds to the value position. *

- * When ack message append to pending ack store, it will store the position which is the max position of this - * ack by the original topic as value. + * value:the position persistent by pendingAck log. *

- * It will judge the position with the max sub cursor position whether smaller than the subCursor mark + * It will judge the position with the max sub cursor position (key) whether smaller than the subCursor mark * delete position. *

- * If the max position is smaller than the subCursor mark delete position, the log cursor will mark delete - * the position. + * If the max position (key) is smaller than the subCursor mark delete position, + * the log cursor will mark delete the position before log position (value). */ - private final ConcurrentSkipListMap metadataPositions; + private final ConcurrentSkipListMap pendingAckLogIndex; private final ManagedCursor subManagedCursor; public MLPendingAckStore(ManagedLedger managedLedger, ManagedCursor cursor, - ManagedCursor subManagedCursor) { + ManagedCursor subManagedCursor, long transactionPendingAckLogIndexMinLag) { this.managedLedger = managedLedger; this.cursor = cursor; this.currentLoadPosition = (PositionImpl) this.cursor.getMarkDeletedPosition(); this.entryQueue = new SpscArrayQueue<>(2000); this.lastConfirmedEntry = (PositionImpl) managedLedger.getLastConfirmedEntry(); - this.metadataPositions = new ConcurrentSkipListMap<>(); + this.pendingAckLogIndex = new ConcurrentSkipListMap<>(); this.subManagedCursor = subManagedCursor; + this.logIndexBackoff = new LogIndexLagBackoff(transactionPendingAckLogIndexMinLag, Long.MAX_VALUE, 1); + this.maxIndexLag = logIndexBackoff.next(0); } @Override - public void replayAsync(PendingAckHandleImpl pendingAckHandle, ScheduledExecutorService transactionReplayExecutor) { + public void replayAsync(PendingAckHandleImpl pendingAckHandle, ExecutorService transactionReplayExecutor) { transactionReplayExecutor .execute(new PendingAckReplay(new MLPendingAckReplyCallBack(pendingAckHandle))); } @@ -120,12 +127,23 @@ public CompletableFuture closeAsync() { cursor.asyncClose(new AsyncCallbacks.CloseCallback() { @Override public void closeComplete(Object ctx) { - try { - managedLedger.close(); - } catch (Exception e) { - completableFuture.completeExceptionally(e); - } - completableFuture.complete(null); + managedLedger.asyncClose(new AsyncCallbacks.CloseCallback() { + + @Override + public void closeComplete(Object ctx) { + if (log.isDebugEnabled()) { + log.debug("[{}][{}] MLPendingAckStore closed successfully!", managedLedger.getName(), ctx); + } + completableFuture.complete(null); + } + + @Override + public void closeFailed(ManagedLedgerException exception, Object ctx) { + log.error("[{}][{}] MLPendingAckStore closed failed,exception={}", managedLedger.getName(), + ctx, exception); + completableFuture.completeExceptionally(exception); + } + }, ctx); } @Override @@ -209,69 +227,22 @@ public void addComplete(Position position, ByteBuf entryData, Object ctx) { log.debug("[{}][{}] MLPendingAckStore message append success at {} txnId: {}, operation : {}", managedLedger.getName(), ctx, position, txnID, pendingAckMetadataEntry.getPendingAckOp()); } - // store the persistent position in to memory - if (pendingAckMetadataEntry.getPendingAckOp() != PendingAckOp.ABORT - && pendingAckMetadataEntry.getPendingAckOp() != PendingAckOp.COMMIT) { - Optional optional = pendingAckMetadataEntry.getPendingAckMetadatasList() - .stream().max((o1, o2) -> ComparisonChain.start().compare(o1.getLedgerId(), - o2.getLedgerId()).compare(o1.getEntryId(), o2.getEntryId()).result()); - optional.ifPresent(pendingAckMetadata -> - metadataPositions.compute((PositionImpl) position, (thisPosition, otherPosition) -> { - PositionImpl nowPosition = PositionImpl.get(pendingAckMetadata.getLedgerId(), - pendingAckMetadata.getEntryId()); - if (otherPosition == null) { - return nowPosition; - } else { - return nowPosition.compareTo(otherPosition) > 0 ? nowPosition : otherPosition; - } - })); - } - + currentIndexLag.incrementAndGet(); + handleMetadataEntry((PositionImpl) position, pendingAckMetadataEntry); buf.release(); completableFuture.complete(null); - if (!metadataPositions.isEmpty()) { - PositionImpl firstPosition = metadataPositions.firstEntry().getKey(); - PositionImpl deletePosition = metadataPositions.firstEntry().getKey(); - while (!metadataPositions.isEmpty() - && metadataPositions.firstKey() != null - && subManagedCursor.getPersistentMarkDeletedPosition() != null - && metadataPositions.firstEntry().getValue() - .compareTo((PositionImpl) subManagedCursor.getPersistentMarkDeletedPosition()) <= 0) { - deletePosition = metadataPositions.firstKey(); - metadataPositions.remove(metadataPositions.firstKey()); - } - - if (firstPosition != deletePosition) { - PositionImpl finalDeletePosition = deletePosition; - cursor.asyncMarkDelete(deletePosition, - new AsyncCallbacks.MarkDeleteCallback() { - @Override - public void markDeleteComplete(Object ctx) { - if (log.isDebugEnabled()) { - log.debug("[{}] Transaction pending ack store mark delete position : " - + "[{}] success", managedLedger.getName(), - finalDeletePosition); - } - } - - @Override - public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { - if (log.isDebugEnabled()) { - log.error("[{}] Transaction pending ack store mark delete position : " - + "[{}] fail!", managedLedger.getName(), - finalDeletePosition, exception); - } - } - }, null); - } - } + clearUselessLogData(); } @Override public void addFailed(ManagedLedgerException exception, Object ctx) { log.error("[{}][{}] MLPendingAckStore message append fail exception : {}, operation : {}", managedLedger.getName(), ctx, exception, pendingAckMetadataEntry.getPendingAckOp()); + + if (exception instanceof ManagedLedgerException.ManagedLedgerAlreadyClosedException) { + managedLedger.readyToCreateNewLedger(); + } buf.release(); completableFuture.completeExceptionally(new PersistenceException(exception)); } @@ -279,6 +250,68 @@ public void addFailed(ManagedLedgerException exception, Object ctx) { return completableFuture; } + private void handleMetadataEntry(PositionImpl logPosition, PendingAckMetadataEntry pendingAckMetadataEntry) { + // store the persistent position in to memory + // store the max position of this entry retain + if (pendingAckMetadataEntry.getPendingAckOp() != PendingAckOp.ABORT + && pendingAckMetadataEntry.getPendingAckOp() != PendingAckOp.COMMIT) { + Optional optional = pendingAckMetadataEntry.getPendingAckMetadatasList() + .stream().max((o1, o2) -> ComparisonChain.start().compare(o1.getLedgerId(), + o2.getLedgerId()).compare(o1.getEntryId(), o2.getEntryId()).result()); + + optional.ifPresent(pendingAckMetadata -> { + PositionImpl nowPosition = PositionImpl.get(pendingAckMetadata.getLedgerId(), + pendingAckMetadata.getEntryId()); + + if (nowPosition.compareTo(maxAckPosition) > 0) { + maxAckPosition = nowPosition; + } + if (currentIndexLag.get() >= maxIndexLag) { + pendingAckLogIndex.compute(maxAckPosition, + (thisPosition, otherPosition) -> logPosition); + maxIndexLag = logIndexBackoff.next(pendingAckLogIndex.size()); + currentIndexLag.set(0); + } + }); + } + } + + private void clearUselessLogData() { + if (!pendingAckLogIndex.isEmpty()) { + PositionImpl deletePosition = null; + while (!pendingAckLogIndex.isEmpty() + && pendingAckLogIndex.firstKey() != null + && subManagedCursor.getPersistentMarkDeletedPosition() != null + && pendingAckLogIndex.firstEntry().getKey() + .compareTo((PositionImpl) subManagedCursor.getPersistentMarkDeletedPosition()) <= 0) { + deletePosition = pendingAckLogIndex.remove(pendingAckLogIndex.firstKey()); + } + + if (deletePosition != null) { + maxIndexLag = logIndexBackoff.next(pendingAckLogIndex.size()); + PositionImpl finalDeletePosition = deletePosition; + cursor.asyncMarkDelete(deletePosition, + new AsyncCallbacks.MarkDeleteCallback() { + @Override + public void markDeleteComplete(Object ctx) { + if (log.isDebugEnabled()) { + log.debug("[{}] Transaction pending ack store mark delete position : " + + "[{}] success", managedLedger.getName(), + finalDeletePosition); + } + } + + @Override + public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { + log.error("[{}] Transaction pending ack store mark delete position : " + + "[{}] fail!", managedLedger.getName(), + finalDeletePosition, exception); + } + }, null); + } + } + } + class PendingAckReplay implements Runnable { private final FillEntryQueueCallback fillEntryQueueCallback; @@ -292,43 +325,26 @@ class PendingAckReplay implements Runnable { @Override public void run() { try { - while (lastConfirmedEntry.compareTo(currentLoadPosition) > 0) { - if (((ManagedCursorImpl) cursor).isClosed()) { - log.warn("[{}] MLPendingAckStore cursor have been closed, close replay thread.", - cursor.getManagedLedger().getName()); - return; - } - fillEntryQueueCallback.fillQueue(); + if (cursor.isClosed()) { + pendingAckReplyCallBack.replayFailed(new ManagedLedgerException + .CursorAlreadyClosedException("MLPendingAckStore cursor have been closed.")); + log.warn("[{}] MLPendingAckStore cursor have been closed, close replay thread.", + cursor.getManagedLedger().getName()); + return; + } + while (lastConfirmedEntry.compareTo(currentLoadPosition) > 0 && fillEntryQueueCallback.fillQueue()) { Entry entry = entryQueue.poll(); if (entry != null) { ByteBuf buffer = entry.getDataBuffer(); currentLoadPosition = PositionImpl.get(entry.getLedgerId(), entry.getEntryId()); PendingAckMetadataEntry pendingAckMetadataEntry = new PendingAckMetadataEntry(); pendingAckMetadataEntry.parseFrom(buffer, buffer.readableBytes()); - // store the persistent position in to memory - // store the max position of this entry retain - if (pendingAckMetadataEntry.getPendingAckOp() != PendingAckOp.ABORT - && pendingAckMetadataEntry.getPendingAckOp() != PendingAckOp.COMMIT) { - Optional optional = pendingAckMetadataEntry.getPendingAckMetadatasList() - .stream().max((o1, o2) -> ComparisonChain.start().compare(o1.getLedgerId(), - o2.getLedgerId()).compare(o1.getEntryId(), o2.getEntryId()).result()); - - optional.ifPresent(pendingAckMetadata -> - metadataPositions.compute(PositionImpl.get(entry.getLedgerId(), entry.getEntryId()), - (thisPosition, otherPosition) -> { - PositionImpl nowPosition = PositionImpl - .get(pendingAckMetadata.getLedgerId(), - pendingAckMetadata.getEntryId()); - if (otherPosition == null) { - return nowPosition; - } else { - return nowPosition.compareTo(otherPosition) > 0 ? nowPosition - : otherPosition; - } - })); - } + currentIndexLag.incrementAndGet(); + handleMetadataEntry(new PositionImpl(entry.getLedgerId(), entry.getEntryId()), + pendingAckMetadataEntry); pendingAckReplyCallBack.handleMetadataEntry(pendingAckMetadataEntry); entry.release(); + clearUselessLogData(); } else { try { Thread.sleep(1); @@ -341,6 +357,7 @@ public void run() { } } } catch (Exception e) { + pendingAckReplyCallBack.replayFailed(e); log.error("[{}] Pending ack recover fail!", subManagedCursor.getManagedLedger().getName(), e); return; } @@ -350,15 +367,19 @@ public void run() { class FillEntryQueueCallback implements AsyncCallbacks.ReadEntriesCallback { + private volatile boolean isReadable = true; private final AtomicLong outstandingReadsRequests = new AtomicLong(0); + private static final int NUMBER_OF_PER_READ_ENTRY = 100; - void fillQueue() { - if (entryQueue.size() < entryQueue.capacity() && outstandingReadsRequests.get() == 0) { + boolean fillQueue() { + if (entryQueue.size() + NUMBER_OF_PER_READ_ENTRY < entryQueue.capacity() + && outstandingReadsRequests.get() == 0) { if (cursor.hasMoreEntries()) { outstandingReadsRequests.incrementAndGet(); - readAsync(100, this); + readAsync(NUMBER_OF_PER_READ_ENTRY, this); } } + return isReadable; } @Override @@ -378,7 +399,13 @@ public Entry get() { @Override public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { - log.error("MLPendingAckStore stat reply fail!", exception); + if (managedLedger.getConfig().isAutoSkipNonRecoverableData() + && exception instanceof ManagedLedgerException.NonRecoverableLedgerException + || exception instanceof ManagedLedgerException.ManagedLedgerFencedException + || exception instanceof ManagedLedgerException.CursorAlreadyClosedException) { + isReadable = false; + } + log.error("MLPendingAckStore of topic [{}] stat reply fail!", managedLedger.getName(), exception); outstandingReadsRequests.decrementAndGet(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java index 548dd8a434a90..6b84d6e329a3e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/MLPendingAckStoreProvider.java @@ -26,9 +26,9 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.transaction.exception.pendingack.TransactionPendingAckException; import org.apache.pulsar.broker.transaction.pendingack.PendingAckStore; import org.apache.pulsar.broker.transaction.pendingack.TransactionPendingAckStoreProvider; -import org.apache.pulsar.broker.transaction.pendingack.exceptions.TransactionPendingAckStoreProviderException; import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition; import org.apache.pulsar.common.naming.TopicName; @@ -45,7 +45,8 @@ public CompletableFuture newPendingAckStore(PersistentSubscript if (subscription == null) { pendingAckStoreFuture.completeExceptionally( - new TransactionPendingAckStoreProviderException("The subscription is null.")); + new TransactionPendingAckException + .TransactionPendingAckStoreProviderException("The subscription is null.")); return pendingAckStoreFuture; } PersistentTopic originPersistentTopic = (PersistentTopic) subscription.getTopic(); @@ -73,9 +74,14 @@ public void openLedgerComplete(ManagedLedger ledger, Object ctx) { InitialPosition.Earliest, new AsyncCallbacks.OpenCursorCallback() { @Override public void openCursorComplete(ManagedCursor cursor, Object ctx) { - pendingAckStoreFuture - .complete(new MLPendingAckStore(ledger, cursor, - subscription.getCursor())); + pendingAckStoreFuture.complete(new MLPendingAckStore(ledger, + cursor, + subscription.getCursor(), + originPersistentTopic + .getBrokerService() + .getPulsar() + .getConfiguration() + .getTransactionPendingAckLogIndexMinLag())); if (log.isDebugEnabled()) { log.debug("{},{} open MLPendingAckStore cursor success", originPersistentTopic.getName(), diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleDisabled.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleDisabled.java index cf6b5c82366a7..119072700dfaa 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleDisabled.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleDisabled.java @@ -42,26 +42,22 @@ public class PendingAckHandleDisabled implements PendingAckHandle { @Override public CompletableFuture individualAcknowledgeMessage(TxnID txnID, - List> positions, - boolean isInCacheRequest) { + List> positions) { return FutureUtil.failedFuture(new NotAllowedException("The transaction is disabled")); } @Override - public CompletableFuture cumulativeAcknowledgeMessage(TxnID txnID, List positions, - boolean isInCacheRequest) { + public CompletableFuture cumulativeAcknowledgeMessage(TxnID txnID, List positions) { return FutureUtil.failedFuture(new NotAllowedException("The transaction is disabled")); } @Override - public CompletableFuture commitTxn(TxnID txnID, Map properties, long lowWaterMark, - boolean isInCacheRequest) { + public CompletableFuture commitTxn(TxnID txnID, Map properties, long lowWaterMark) { return FutureUtil.failedFuture(new NotAllowedException("The transaction is disabled")); } @Override - public CompletableFuture abortTxn(TxnID txnId, Consumer consumer, long lowWaterMark, - boolean isInCacheRequest) { + public CompletableFuture abortTxn(TxnID txnId, Consumer consumer, long lowWaterMark) { return FutureUtil.failedFuture(new NotAllowedException("The transaction is disabled")); } @@ -99,4 +95,9 @@ public TransactionPendingAckStats getStats() { public CompletableFuture close() { return CompletableFuture.completedFuture(null); } + + @Override + public boolean checkIfPendingAckStoreInit() { + return false; + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleImpl.java index 78bab966b92e9..41ef25b3e4d65 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/pendingack/impl/PendingAckHandleImpl.java @@ -22,14 +22,19 @@ import static org.apache.bookkeeper.mledger.util.PositionAckSetUtil.compareToWithAckSet; import static org.apache.bookkeeper.mledger.util.PositionAckSetUtil.isAckSetOverlap; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.Semaphore; +import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.Position; @@ -42,7 +47,6 @@ import org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException; import org.apache.pulsar.broker.service.Consumer; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; -import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.transaction.pendingack.PendingAckHandle; import org.apache.pulsar.broker.transaction.pendingack.PendingAckStore; import org.apache.pulsar.broker.transaction.pendingack.TransactionPendingAckStoreProvider; @@ -111,13 +115,30 @@ public class PendingAckHandleImpl extends PendingAckHandleState implements Pendi private final BlockingQueue acceptQueue = new LinkedBlockingDeque<>(); + /** + * The map is used to store the lowWaterMarks which key is TC ID and value is lowWaterMark of the TC. + */ + private final ConcurrentHashMap lowWaterMarks = new ConcurrentHashMap<>(); + + private final Semaphore handleLowWaterMark = new Semaphore(1); + + @Getter + private final ExecutorService internalPinnedExecutor; + + public PendingAckHandleImpl(PersistentSubscription persistentSubscription) { super(State.None); this.topicName = persistentSubscription.getTopicName(); this.subName = persistentSubscription.getName(); this.persistentSubscription = persistentSubscription; - - this.pendingAckStoreProvider = ((PersistentTopic) this.persistentSubscription.getTopic()) + internalPinnedExecutor = persistentSubscription + .getTopic() + .getBrokerService() + .getPulsar() + .getTransactionExecutorProvider() + .getExecutor(this); + + this.pendingAckStoreProvider = this.persistentSubscription.getTopic() .getBrokerService().getPulsar().getTransactionPendingAckStoreProvider(); pendingAckStoreProvider.checkInitializedBefore(persistentSubscription).thenAccept(init -> { if (init) { @@ -130,21 +151,18 @@ public PendingAckHandleImpl(PersistentSubscription persistentSubscription) { private void initPendingAckStore() { if (changeToInitializingState()) { - synchronized (PendingAckHandleImpl.this) { - if (!checkIfClose()) { - this.pendingAckStoreFuture = - pendingAckStoreProvider.newPendingAckStore(persistentSubscription); - this.pendingAckStoreFuture.thenAccept(pendingAckStore -> { - pendingAckStore.replayAsync(this, - ((PersistentTopic) persistentSubscription.getTopic()).getBrokerService() - .getPulsar().getTransactionReplayExecutor()); - }).exceptionally(e -> { - acceptQueue.clear(); - changeToErrorState(); - log.error("PendingAckHandleImpl init fail! TopicName : {}, SubName: {}", topicName, subName, e); - return null; - }); - } + if (!checkIfClose()) { + this.pendingAckStoreFuture = + pendingAckStoreProvider.newPendingAckStore(persistentSubscription); + this.pendingAckStoreFuture.thenAccept(pendingAckStore -> { + pendingAckStore.replayAsync(this, internalPinnedExecutor); + }).exceptionally(e -> { + acceptQueue.clear(); + changeToErrorState(); + log.error("PendingAckHandleImpl init fail! TopicName : {}, SubName: {}", topicName, subName, e); + exceptionHandleFuture(e.getCause()); + return null; + }); } } } @@ -152,50 +170,20 @@ private void initPendingAckStore() { private void addIndividualAcknowledgeMessageRequest(TxnID txnID, List> positions, CompletableFuture completableFuture) { - acceptQueue.add(() -> individualAcknowledgeMessage(txnID, positions, true).thenAccept(v -> - completableFuture.complete(null)).exceptionally(e -> { - completableFuture.completeExceptionally(e); - return null; - })); + acceptQueue.add(() -> internalIndividualAcknowledgeMessage(txnID, positions, completableFuture)); } - @Override - public CompletableFuture individualAcknowledgeMessage(TxnID txnID, - List> positions, - boolean isInCacheRequest) { - - if (!checkIfReady()) { - CompletableFuture completableFuture = new CompletableFuture<>(); - synchronized (PendingAckHandleImpl.this) { - switch (state) { - case Initializing: - addIndividualAcknowledgeMessageRequest(txnID, positions, completableFuture); - return completableFuture; - case None: - addIndividualAcknowledgeMessageRequest(txnID, positions, completableFuture); - initPendingAckStore(); - return completableFuture; - case Error: - completableFuture.completeExceptionally( - new ServiceUnitNotReadyException("PendingAckHandle not replay error!")); - return completableFuture; - case Close: - completableFuture.completeExceptionally( - new ServiceUnitNotReadyException("PendingAckHandle have been closed!")); - return completableFuture; - default: - break; - } - } - } - + public void internalIndividualAcknowledgeMessage(TxnID txnID, List> positions, + CompletableFuture completableFuture) { if (txnID == null) { - return FutureUtil.failedFuture(new NotAllowedException("TransactionID can not be null.")); + completableFuture.completeExceptionally(new NotAllowedException("Positions can not be null.")); + return; + } if (positions == null) { - return FutureUtil.failedFuture(new NotAllowedException("Positions can not be null.")); + completableFuture.completeExceptionally(new NotAllowedException("Positions can not be null.")); + return; } - CompletableFuture completableFuture = new CompletableFuture<>(); this.pendingAckStoreFuture.thenAccept(pendingAckStore -> pendingAckStore.appendIndividualAck(txnID, positions).thenAccept(v -> { @@ -288,67 +276,67 @@ && isAckSetOverlap(individualAckPositions completableFuture.completeExceptionally(e); return null; }); - return completableFuture; - } - - private void addCumulativeAcknowledgeMessageRequest(TxnID txnID, - List positions, - CompletableFuture completableFuture) { - acceptQueue.add(() -> cumulativeAcknowledgeMessage(txnID, positions, true).thenAccept(v -> - completableFuture.complete(null)).exceptionally(e -> { - completableFuture.completeExceptionally(e); - return null; - })); } @Override - public CompletableFuture cumulativeAcknowledgeMessage(TxnID txnID, - List positions, - boolean isInCacheRequest) { - if (!checkIfReady()) { - CompletableFuture completableFuture = new CompletableFuture<>(); - synchronized (PendingAckHandleImpl.this) { + public CompletableFuture individualAcknowledgeMessage(TxnID txnID, + List> positions) { + CompletableFuture completableFuture = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { switch (state) { case Initializing: - addCumulativeAcknowledgeMessageRequest(txnID, positions, completableFuture); - return completableFuture; + addIndividualAcknowledgeMessageRequest(txnID, positions, completableFuture); + return; case None: - addCumulativeAcknowledgeMessageRequest(txnID, positions, completableFuture); + addIndividualAcknowledgeMessageRequest(txnID, positions, completableFuture); initPendingAckStore(); - return completableFuture; + return; case Error: completableFuture.completeExceptionally( new ServiceUnitNotReadyException("PendingAckHandle not replay error!")); - return completableFuture; + return; case Close: completableFuture.completeExceptionally( new ServiceUnitNotReadyException("PendingAckHandle have been closed!")); - return completableFuture; + return; default: break; - } } - } + internalIndividualAcknowledgeMessage(txnID, positions, completableFuture); + }); + return completableFuture; + } + + private void addCumulativeAcknowledgeMessageRequest(TxnID txnID, + List positions, + CompletableFuture completableFuture) { + acceptQueue.add(() -> internalCumulativeAcknowledgeMessage(txnID, positions, completableFuture)); + } + public void internalCumulativeAcknowledgeMessage(TxnID txnID, + List positions, + CompletableFuture completableFuture) { if (txnID == null) { - return FutureUtil.failedFuture(new NotAllowedException("TransactionID can not be null.")); + completableFuture.completeExceptionally(new NotAllowedException("TransactionID can not be null.")); + return; } if (positions == null) { - return FutureUtil.failedFuture(new NotAllowedException("Positions can not be null.")); + completableFuture.completeExceptionally(new NotAllowedException("Positions can not be null.")); + return; } if (positions.size() != 1) { String errorMsg = "[" + topicName + "][" + subName + "] Transaction:" + txnID + " invalid cumulative ack received with multiple message ids."; log.error(errorMsg); - return FutureUtil.failedFuture(new NotAllowedException(errorMsg)); + completableFuture.completeExceptionally(new NotAllowedException(errorMsg)); + return; } PositionImpl position = positions.get(0); - CompletableFuture completableFuture = new CompletableFuture<>(); - this.pendingAckStoreFuture.thenAccept(pendingAckStore -> pendingAckStore.appendCumulativeAck(txnID, position).thenAccept(v -> { if (log.isDebugEnabled()) { @@ -391,59 +379,46 @@ public CompletableFuture cumulativeAcknowledgeMessage(TxnID txnID, completableFuture.completeExceptionally(e); return null; }); - return completableFuture; - } - - private void addCommitTxnRequest(TxnID txnId, Map properties, long lowWaterMark, - CompletableFuture completableFuture) { - acceptQueue.add(() -> commitTxn(txnId, properties, lowWaterMark, true).thenAccept(v -> - completableFuture.complete(null)).exceptionally(e -> { - completableFuture.completeExceptionally(e); - return null; - })); } @Override - public synchronized CompletableFuture commitTxn(TxnID txnID, Map properties, - long lowWaterMark, boolean isInCacheRequest) { - if (!checkIfReady()) { - synchronized (PendingAckHandleImpl.this) { - if (state == State.Initializing) { - CompletableFuture completableFuture = new CompletableFuture<>(); - addCommitTxnRequest(txnID, properties, lowWaterMark, completableFuture); - return completableFuture; - } else if (state == State.None) { - CompletableFuture completableFuture = new CompletableFuture<>(); - addCommitTxnRequest(txnID, properties, lowWaterMark, completableFuture); - initPendingAckStore(); - return completableFuture; - } else if (checkIfReady()) { - - } else { - if (state == State.Error) { - return FutureUtil.failedFuture( + public CompletableFuture cumulativeAcknowledgeMessage(TxnID txnID, List positions) { + CompletableFuture completableFuture = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { + switch (state) { + case Initializing: + addCumulativeAcknowledgeMessageRequest(txnID, positions, completableFuture); + return; + case None: + addCumulativeAcknowledgeMessageRequest(txnID, positions, completableFuture); + initPendingAckStore(); + return; + case Error: + completableFuture.completeExceptionally( new ServiceUnitNotReadyException("PendingAckHandle not replay error!")); - } else { - return FutureUtil.failedFuture( + return; + case Close: + completableFuture.completeExceptionally( new ServiceUnitNotReadyException("PendingAckHandle have been closed!")); - } - + return; + default: + break; } } - } + internalCumulativeAcknowledgeMessage(txnID, positions, completableFuture); + }); - if (!acceptQueue.isEmpty() && !isInCacheRequest) { - synchronized (PendingAckHandleImpl.this) { - if (!acceptQueue.isEmpty()) { - CompletableFuture completableFuture = new CompletableFuture<>(); - addCommitTxnRequest(txnID, properties, lowWaterMark, completableFuture); - return completableFuture; - } - } - } + return completableFuture; + } - CompletableFuture commitFuture = new CompletableFuture<>(); + private void addCommitTxnRequest(TxnID txnId, Map properties, long lowWaterMark, + CompletableFuture completableFuture) { + acceptQueue.add(() -> internalCommitTxn(txnId, properties, lowWaterMark, completableFuture)); + } + private void internalCommitTxn(TxnID txnID, Map properties, long lowWaterMark, + CompletableFuture commitFuture) { // It's valid to create transaction then commit without doing any operation, which will cause // pendingAckMessagesMap to be null. if (this.cumulativeAckOfTransaction != null) { @@ -499,58 +474,44 @@ public synchronized CompletableFuture commitTxn(TxnID txnID, Map completableFuture) { - acceptQueue.add(() -> abortTxn(txnId, consumer, lowWaterMark, true).thenAccept(v -> - completableFuture.complete(null)).exceptionally(e -> { - completableFuture.completeExceptionally(e); - return null; - })); } @Override - public synchronized CompletableFuture abortTxn(TxnID txnId, Consumer consumer, - long lowWaterMark, boolean isInCacheRequest) { - if (!checkIfReady()) { - synchronized (PendingAckHandleImpl.this) { - if (state == State.Initializing) { - CompletableFuture completableFuture = new CompletableFuture<>(); - addAbortTxnRequest(txnId, consumer, lowWaterMark, completableFuture); - return completableFuture; - } else if (state == State.None) { - CompletableFuture completableFuture = new CompletableFuture<>(); - addAbortTxnRequest(txnId, consumer, lowWaterMark, completableFuture); - initPendingAckStore(); - return completableFuture; - } else if (checkIfReady()) { - - } else { - if (state == State.Error) { - return FutureUtil.failedFuture( - new ServiceUnitNotReadyException("PendingAckHandle not replay error!")); - } else { - return FutureUtil.failedFuture( - new ServiceUnitNotReadyException("PendingAckHandle have been closed!")); - } + public CompletableFuture commitTxn(TxnID txnID, Map properties, long lowWaterMark) { + CompletableFuture commitFuture = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { + switch (state) { + case Initializing: + addCommitTxnRequest(txnID, properties, lowWaterMark, commitFuture); + return; + case None: + addCommitTxnRequest(txnID, properties, lowWaterMark, commitFuture); + initPendingAckStore(); + return; + case Error: + if (state == State.Error) { + commitFuture.completeExceptionally( + new ServiceUnitNotReadyException("PendingAckHandle not replay error!")); + } else { + commitFuture.completeExceptionally( + new ServiceUnitNotReadyException("PendingAckHandle have been closed!")); + } + return; } } - } - + internalCommitTxn(txnID, properties, lowWaterMark, commitFuture); + }); + return commitFuture; + } - if (!acceptQueue.isEmpty() && !isInCacheRequest) { - synchronized (PendingAckHandleImpl.this) { - if (!acceptQueue.isEmpty()) { - CompletableFuture completableFuture = new CompletableFuture<>(); - addAbortTxnRequest(txnId, consumer, lowWaterMark, completableFuture); - return completableFuture; - } - } - } + private void addAbortTxnRequest(TxnID txnId, Consumer consumer, long lowWaterMark, + CompletableFuture completableFuture) { + acceptQueue.add(() -> internalAbortTxn(txnId, consumer, lowWaterMark, completableFuture)); + } - CompletableFuture abortFuture = new CompletableFuture<>(); + public CompletableFuture internalAbortTxn(TxnID txnId, Consumer consumer, + long lowWaterMark, CompletableFuture abortFuture) { if (this.cumulativeAckOfTransaction != null) { pendingAckStoreFuture.thenAccept(pendingAckStore -> pendingAckStore.appendAbortMark(txnId, AckType.Cumulative).thenAccept(v -> { @@ -610,29 +571,64 @@ public synchronized CompletableFuture abortTxn(TxnID txnId, Consumer consu return abortFuture; } + @Override + public CompletableFuture abortTxn(TxnID txnId, Consumer consumer, long lowWaterMark) { + CompletableFuture abortFuture = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { + switch (state) { + case Initializing: + addAbortTxnRequest(txnId, consumer, lowWaterMark, abortFuture); + return; + case None: + addAbortTxnRequest(txnId, consumer, lowWaterMark, abortFuture); + initPendingAckStore(); + return; + default: + if (state == State.Error) { + abortFuture.completeExceptionally( + new ServiceUnitNotReadyException("PendingAckHandle not replay error!")); + } else { + abortFuture.completeExceptionally( + new ServiceUnitNotReadyException("PendingAckHandle have been closed!")); + } + return; + } + } + internalAbortTxn(txnId, consumer, lowWaterMark, abortFuture); + }); + return abortFuture; + } + private void handleLowWaterMark(TxnID txnID, long lowWaterMark) { - if (individualAckOfTransaction != null && !individualAckOfTransaction.isEmpty()) { - TxnID firstTxn = individualAckOfTransaction.firstKey(); - - if (firstTxn.getMostSigBits() == txnID.getMostSigBits() - && firstTxn.getLeastSigBits() <= lowWaterMark) { - this.pendingAckStoreFuture.whenComplete((pendingAckStore, throwable) -> { - if (throwable == null) { - pendingAckStore.appendAbortMark(txnID, AckType.Individual).thenAccept(v -> { - synchronized (PendingAckHandleImpl.this) { - log.warn("[{}] Transaction pending ack handle low water mark success! txnId : [{}], " - + "lowWaterMark : [{}]", topicName, txnID, lowWaterMark); - individualAckOfTransaction.remove(firstTxn); - handleLowWaterMark(txnID, lowWaterMark); - } - }).exceptionally(e -> { - log.warn("[{}] Transaction pending ack handle low water mark fail! txnId : [{}], " - + "lowWaterMark : [{}]", topicName, txnID, lowWaterMark); - return null; - }); - } - }); + lowWaterMarks.compute(txnID.getMostSigBits(), (tcId, oldLowWaterMark) -> { + if (oldLowWaterMark == null || oldLowWaterMark < lowWaterMark) { + return lowWaterMark; + } else { + return oldLowWaterMark; } + }); + + if (handleLowWaterMark.tryAcquire()) { + if (individualAckOfTransaction != null && !individualAckOfTransaction.isEmpty()) { + TxnID firstTxn = individualAckOfTransaction.firstKey(); + long tCId = firstTxn.getMostSigBits(); + Long lowWaterMarkOfFirstTxnId = lowWaterMarks.get(tCId); + if (lowWaterMarkOfFirstTxnId != null && firstTxn.getLeastSigBits() <= lowWaterMarkOfFirstTxnId) { + abortTxn(firstTxn, null, lowWaterMarkOfFirstTxnId).thenRun(() -> { + log.warn("[{}] Transaction pending ack handle low water mark success! txnId : [{}], " + + "lowWaterMark : [{}]", topicName, firstTxn, lowWaterMarkOfFirstTxnId); + handleLowWaterMark.release(); + }).exceptionally(ex -> { + log.warn("[{}] Transaction pending ack handle low water mark fail! txnId : [{}], " + + "lowWaterMark : [{}]", topicName, firstTxn, lowWaterMarkOfFirstTxnId); + handleLowWaterMark.release(); + return null; + }); + return; + } + } + handleLowWaterMark.release(); } } @@ -692,13 +688,18 @@ private void individualAckAbortCommon(TxnID txnID, HashMap positionPair = positions.get(i); + positionPair.left = PositionImpl.get(positionPair.getLeft().getLedgerId(), + positionPair.getLeft().getEntryId(), + Arrays.copyOf(positionPair.left.getAckSet(), positionPair.left.getAckSet().length)); this.individualAckPositions.put(position, positions.get(i)); } else { MutablePair positionPair = @@ -889,6 +903,12 @@ public synchronized void completeHandleFuture() { } } + public synchronized void exceptionHandleFuture(Throwable t) { + if (!this.pendingAckHandleCompletableFuture.isDone()) { + this.pendingAckHandleCompletableFuture.completeExceptionally(t); + } + } + @Override public TransactionInPendingAckStats getTransactionInPendingAckStats(TxnID txnID) { TransactionInPendingAckStats transactionInPendingAckStats = new TransactionInPendingAckStats(); @@ -923,7 +943,7 @@ public CompletableFuture close() { } public CompletableFuture getStoreManageLedger() { - if (this.pendingAckStoreFuture.isDone()) { + if (this.pendingAckStoreFuture != null && this.pendingAckStoreFuture.isDone()) { return this.pendingAckStoreFuture.thenCompose(pendingAckStore -> { if (pendingAckStore instanceof MLPendingAckStore) { return ((MLPendingAckStore) pendingAckStore).getManagedLedger(); @@ -937,6 +957,11 @@ public CompletableFuture getStoreManageLedger() { } } + @Override + public boolean checkIfPendingAckStoreInit() { + return this.pendingAckStoreFuture != null && this.pendingAckStoreFuture.isDone(); + } + protected void handleCacheRequest() { while (true) { Runnable runnable = acceptQueue.poll(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/recover/TransactionRecoverTrackerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/recover/TransactionRecoverTrackerImpl.java index 3667e666313c0..05b61fd637019 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/recover/TransactionRecoverTrackerImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/recover/TransactionRecoverTrackerImpl.java @@ -123,9 +123,11 @@ public void appendOpenTransactionToTimeoutTracker() { @Override public void handleCommittingAndAbortingTransaction() { committingTransactions.forEach(k -> - transactionMetadataStoreService.endTransaction(new TxnID(tcId, k), TxnAction.COMMIT_VALUE, false)); + transactionMetadataStoreService.endTransaction(new TxnID(tcId, k), TxnAction.COMMIT_VALUE, + false)); abortingTransactions.forEach(k -> - transactionMetadataStoreService.endTransaction(new TxnID(tcId, k), TxnAction.ABORT_VALUE, false)); + transactionMetadataStoreService.endTransaction(new TxnID(tcId, k), TxnAction.ABORT_VALUE, + false)); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/util/LogIndexLagBackoff.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/util/LogIndexLagBackoff.java new file mode 100644 index 0000000000000..145381814ba61 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/util/LogIndexLagBackoff.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.util; + +import static com.google.common.base.Preconditions.checkArgument; +import lombok.Getter; + +public class LogIndexLagBackoff { + + @Getter + private final long minLag; + @Getter + private final long maxLag; + @Getter + private final double exponent; + + public LogIndexLagBackoff(long minLag, long maxLag, double exponent) { + checkArgument(minLag > 0, "min lag must be > 0"); + checkArgument(maxLag >= minLag, "maxLag should be >= minLag"); + checkArgument(exponent > 0, "exponent must be > 0"); + this.minLag = minLag; + this.maxLag = maxLag; + this.exponent = exponent; + } + + + public long next(int indexCount) { + if (indexCount <= 0) { + return minLag; + } + return (long) Math.min(this.maxLag, minLag * Math.pow(indexCount, exponent)); + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/util/package-info.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/util/package-info.java new file mode 100644 index 0000000000000..58cb1c24b19e6 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/util/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Implementation of a transaction tools. + */ +package org.apache.pulsar.broker.transaction.util; diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java index d4955de8ba999..da9a3f060abd2 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/PulsarWebResource.java @@ -38,6 +38,7 @@ import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.WebApplicationException; +import javax.ws.rs.container.AsyncResponse; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; @@ -244,6 +245,86 @@ protected void validateAdminAccessForTenant(String tenant) { } } + protected static CompletableFuture validateAdminAccessForTenantAsync( + PulsarService pulsar, String clientAppId, + String originalPrincipal, String tenant, + AuthenticationDataSource authenticationData) { + if (log.isDebugEnabled()) { + log.debug("check admin access on tenant: {} - Authenticated: {} -- role: {}", tenant, + (isClientAuthenticated(clientAppId)), clientAppId); + } + return pulsar.getPulsarResources().getTenantResources().getTenantAsync(tenant) + .thenCompose(tenantInfoOptional -> { + if (!tenantInfoOptional.isPresent()) { + throw new RestException(Status.NOT_FOUND, "Tenant does not exist"); + } + TenantInfo tenantInfo = tenantInfoOptional.get(); + if (pulsar.getConfiguration().isAuthenticationEnabled() && pulsar.getConfiguration() + .isAuthorizationEnabled()) { + if (!isClientAuthenticated(clientAppId)) { + throw new RestException(Status.FORBIDDEN, "Need to authenticate to perform the request"); + } + validateOriginalPrincipal(pulsar.getConfiguration().getProxyRoles(), clientAppId, + originalPrincipal); + if (pulsar.getConfiguration().getProxyRoles().contains(clientAppId)) { + AuthorizationService authorizationService = + pulsar.getBrokerService().getAuthorizationService(); + return authorizationService.isTenantAdmin(tenant, clientAppId, tenantInfo, + authenticationData) + .thenCompose(isTenantAdmin -> { + String debugMsg = "Successfully authorized {} (proxied by {}) on tenant {}"; + if (!isTenantAdmin) { + return authorizationService.isSuperUser(clientAppId, authenticationData) + .thenCombine(authorizationService.isSuperUser(originalPrincipal, + authenticationData), + (proxyAuthorized, originalPrincipalAuthorized) -> { + if (!proxyAuthorized || !originalPrincipalAuthorized) { + throw new RestException(Status.UNAUTHORIZED, + String.format( + "Proxy not authorized to access " + + "resource (proxy:%s,original:%s)" + , clientAppId, originalPrincipal)); + } else { + if (log.isDebugEnabled()) { + log.debug(debugMsg, originalPrincipal, + clientAppId, tenant); + } + return null; + } + }); + } else { + if (log.isDebugEnabled()) { + log.debug(debugMsg, originalPrincipal, clientAppId, tenant); + } + return CompletableFuture.completedFuture(null); + } + }); + } else { + return pulsar.getBrokerService() + .getAuthorizationService() + .isSuperUser(clientAppId, authenticationData) + .thenCompose(isSuperUser -> { + if (!isSuperUser) { + return pulsar.getBrokerService().getAuthorizationService() + .isTenantAdmin(tenant, clientAppId, tenantInfo, authenticationData); + } else { + return CompletableFuture.completedFuture(true); + } + }).thenAccept(authorized -> { + if (!authorized) { + throw new RestException(Status.UNAUTHORIZED, + "Don't have permission to administrate resources on this tenant"); + } else { + log.debug("Successfully authorized {} on tenant {}", clientAppId, tenant); + } + }); + } + } else { + return CompletableFuture.completedFuture(null); + } + }); + } + protected static void validateAdminAccessForTenant(PulsarService pulsar, String clientAppId, String originalPrincipal, String tenant, AuthenticationDataSource authenticationData) @@ -709,6 +790,11 @@ public static CompletableFuture checkLocalOrGetPeerReplicationC if (!namespace.isGlobal()) { return CompletableFuture.completedFuture(null); } + NamespaceName heartbeatNamespace = pulsarService.getHeartbeatNamespaceV2(); + if (namespace.equals(heartbeatNamespace)) { + return CompletableFuture.completedFuture(null); + } + final CompletableFuture validationFuture = new CompletableFuture<>(); final String localCluster = pulsarService.getConfiguration().getClusterName(); @@ -716,7 +802,12 @@ public static CompletableFuture checkLocalOrGetPeerReplicationC .getPoliciesAsync(namespace).thenAccept(policiesResult -> { if (policiesResult.isPresent()) { Policies policies = policiesResult.get(); - if (policies.replication_clusters.isEmpty()) { + if (policies.deleted) { + String msg = String.format("Namespace %s is deleted", namespace.toString()); + log.warn(msg); + validationFuture.completeExceptionally(new RestException(Status.PRECONDITION_FAILED, + "Namespace is deleted")); + } else if (policies.replication_clusters.isEmpty()) { String msg = String.format( "Namespace does not have any clusters configured : local_cluster=%s ns=%s", localCluster, namespace.toString()); @@ -740,9 +831,9 @@ public static CompletableFuture checkLocalOrGetPeerReplicationC validationFuture.complete(null); } } else { - String msg = String.format("Policies not found for %s namespace", namespace.toString()); + String msg = String.format("Namespace %s not found", namespace.toString()); log.warn(msg); - validationFuture.completeExceptionally(new RestException(Status.NOT_FOUND, msg)); + validationFuture.completeExceptionally(new RestException(Status.NOT_FOUND, "Namespace not found")); } }).exceptionally(ex -> { String msg = String.format("Failed to validate global cluster configuration : cluster=%s ns=%s emsg=%s", @@ -784,18 +875,22 @@ private static ClusterDataImpl getOwnerFromPeerClusterList(PulsarService pulsar, return null; } - protected static void checkAuthorization(PulsarService pulsarService, TopicName topicName, String role, - AuthenticationDataSource authenticationData) throws Exception { + protected static CompletableFuture checkAuthorizationAsync(PulsarService pulsarService, + TopicName topicName, String role, + AuthenticationDataSource authenticationData) { if (!pulsarService.getConfiguration().isAuthorizationEnabled()) { // No enforcing of authorization policies - return; + return CompletableFuture.completedFuture(null); } // get zk policy manager - if (!pulsarService.getBrokerService().getAuthorizationService().allowTopicOperation(topicName, - TopicOperation.LOOKUP, null, role, authenticationData)) { - log.warn("[{}] Role {} is not allowed to lookup topic", topicName, role); - throw new RestException(Status.UNAUTHORIZED, "Don't have permission to connect to this namespace"); - } + return pulsarService.getBrokerService().getAuthorizationService().allowTopicOperationAsync(topicName, + TopicOperation.LOOKUP, null, role, authenticationData).thenAccept(allow -> { + if (!allow) { + log.warn("[{}] Role {} is not allowed to lookup topic", topicName, role); + throw new RestException(Status.UNAUTHORIZED, + "Don't have permission to connect to this namespace"); + } + }); } // Used for unit tests access @@ -851,6 +946,29 @@ && pulsar().getBrokerService().isAuthorizationEnabled()) { } } + public CompletableFuture validateNamespaceOperationAsync(NamespaceName namespaceName, + NamespaceOperation operation) { + if (pulsar().getConfiguration().isAuthenticationEnabled() + && pulsar().getBrokerService().isAuthorizationEnabled()) { + if (!isClientAuthenticated(clientAppId())) { + return FutureUtil.failedFuture( + new RestException(Status.FORBIDDEN, "Need to authenticate to perform the request")); + } + + return pulsar().getBrokerService().getAuthorizationService() + .allowNamespaceOperationAsync(namespaceName, operation, originalPrincipal(), + clientAppId(), clientAuthData()) + .thenAccept(isAuthorized -> { + if (!isAuthorized) { + throw new RestException(Status.FORBIDDEN, + String.format("Unauthorized to validateNamespaceOperation for" + + " operation [%s] on namespace [%s]", operation.toString(), namespaceName)); + } + }); + } + return CompletableFuture.completedFuture(null); + } + public void validateNamespacePolicyOperation(NamespaceName namespaceName, PolicyName policy, PolicyOperation operation) { @@ -1016,23 +1134,52 @@ public void validateTopicOperation(TopicName topicName, TopicOperation operation } public void validateTopicOperation(TopicName topicName, TopicOperation operation, String subscription) { + try { + validateTopicOperationAsync(topicName, operation, subscription).get(); + } catch (InterruptedException | ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof WebApplicationException){ + throw (WebApplicationException) cause; + } else { + throw new RestException(cause); + } + } + } + + public CompletableFuture validateTopicOperationAsync(TopicName topicName, TopicOperation operation) { + return validateTopicOperationAsync(topicName, operation, null); + } + + public CompletableFuture validateTopicOperationAsync(TopicName topicName, + TopicOperation operation, String subscription) { if (pulsar().getConfiguration().isAuthenticationEnabled() && pulsar().getBrokerService().isAuthorizationEnabled()) { if (!isClientAuthenticated(clientAppId())) { throw new RestException(Status.UNAUTHORIZED, "Need to authenticate to perform the request"); } - AuthenticationDataHttps authData = clientAuthData(); authData.setSubscription(subscription); - - Boolean isAuthorized = pulsar().getBrokerService().getAuthorizationService() - .allowTopicOperation(topicName, operation, originalPrincipal(), clientAppId(), authData); - - if (!isAuthorized) { - throw new RestException(Status.UNAUTHORIZED, String.format("Unauthorized to validateTopicOperation for" - + " operation [%s] on topic [%s]", operation.toString(), topicName)); - } + return pulsar().getBrokerService().getAuthorizationService() + .allowTopicOperationAsync(topicName, operation, originalPrincipal(), clientAppId(), authData) + .thenAccept(isAuthorized -> { + if (!isAuthorized) { + throw new RestException(Status.UNAUTHORIZED, String.format( + "Unauthorized to validateTopicOperation for operation [%s] on topic [%s]", + operation.toString(), topicName)); + } + }); + } else { + return CompletableFuture.completedFuture(null); } } + protected Void handleCommonRestAsyncException(AsyncResponse asyncResponse, Throwable ex) { + Throwable realCause = FutureUtil.unwrapCompletionException(ex); + if (realCause instanceof WebApplicationException) { + asyncResponse.resume(realCause); + } else { + asyncResponse.resume(new RestException(realCause)); + } + return null; + } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/RestException.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/RestException.java index 0cec819c46a53..dcdd69f90c2d1 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/RestException.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/RestException.java @@ -38,10 +38,9 @@ static String getExceptionData(Throwable t) { writer.append("\n --- An unexpected error occurred in the server ---\n\n"); if (t != null) { writer.append("Message: ").append(t.getMessage()).append("\n\n"); + writer.append("Stacktrace:\n\n"); + t.printStackTrace(new PrintWriter(writer)); } - writer.append("Stacktrace:\n\n"); - - t.printStackTrace(new PrintWriter(writer)); return writer.toString(); } @@ -50,7 +49,11 @@ public RestException(Response.Status status, String message) { } public RestException(int code, String message) { - super(message, Response.status(code).entity(new ErrorData(message)).type(MediaType.APPLICATION_JSON).build()); + super(message, Response + .status(code, message) + .entity(new ErrorData(message)) + .type(MediaType.APPLICATION_JSON) + .build()); } public RestException(Throwable t) { @@ -77,9 +80,9 @@ private static Response getResponse(Throwable t) { return e.getResponse(); } else { return Response - .status(Status.INTERNAL_SERVER_ERROR) - .entity(getExceptionData(t)) - .type(MediaType.TEXT_PLAIN) + .status(Status.INTERNAL_SERVER_ERROR.getStatusCode(), t.getMessage()) + .entity(new ErrorData(getExceptionData(t))) + .type(MediaType.APPLICATION_JSON) .build(); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/WebExecutorStats.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/WebExecutorStats.java new file mode 100644 index 0000000000000..45f3a1e562b26 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/WebExecutorStats.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.web; + +import io.prometheus.client.CollectorRegistry; +import io.prometheus.client.Gauge; +import java.util.concurrent.atomic.AtomicBoolean; + +public class WebExecutorStats implements AutoCloseable { + private static final AtomicBoolean CLOSED = new AtomicBoolean(false); + + private final Gauge maxThreads; + private final Gauge minThreads; + private final Gauge idleThreads; + private final Gauge activeThreads; + private final Gauge currentThreads; + private final WebExecutorThreadPool executor; + + private static volatile WebExecutorStats instance; + + static synchronized WebExecutorStats getStats(WebExecutorThreadPool executor) { + if (null == instance) { + instance = new WebExecutorStats(executor); + } + + return instance; + } + + private WebExecutorStats(WebExecutorThreadPool executor) { + this.executor = executor; + + this.maxThreads = Gauge.build("pulsar_web_executor_max_threads", "-").create() + .setChild(new Gauge.Child() { + public double get() { + return WebExecutorStats.this.executor.getMaxThreads(); + } + }) + .register(); + + this.minThreads = Gauge.build("pulsar_web_executor_min_threads", "-").create() + .setChild(new Gauge.Child() { + public double get() { + return WebExecutorStats.this.executor.getMinThreads(); + } + }) + .register(); + + this.idleThreads = Gauge.build("pulsar_web_executor_idle_threads", "-").create() + .setChild(new Gauge.Child() { + public double get() { + return WebExecutorStats.this.executor.getIdleThreads(); + } + }) + .register(); + + this.activeThreads = Gauge.build("pulsar_web_executor_active_threads", "-").create() + .setChild(new Gauge.Child() { + public double get() { + return WebExecutorStats.this.executor.getThreads() + - WebExecutorStats.this.executor.getIdleThreads(); + } + }) + .register(); + + this.currentThreads = Gauge.build("pulsar_web_executor_current_threads", "-").create() + .setChild(new Gauge.Child() { + public double get() { + return WebExecutorStats.this.executor.getThreads(); + } + }) + .register(); + } + + @Override + public void close() throws Exception { + if (CLOSED.compareAndSet(false, true)) { + CollectorRegistry.defaultRegistry.unregister(this.activeThreads); + CollectorRegistry.defaultRegistry.unregister(this.maxThreads); + CollectorRegistry.defaultRegistry.unregister(this.minThreads); + CollectorRegistry.defaultRegistry.unregister(this.idleThreads); + CollectorRegistry.defaultRegistry.unregister(this.currentThreads); + } + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/WebService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/WebService.java index e5e2ab1d21889..f2542745bfa8d 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/WebService.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/web/WebService.java @@ -30,8 +30,7 @@ import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; -import org.apache.pulsar.common.util.SecurityUtility; -import org.apache.pulsar.common.util.keystoretls.KeyStoreSSLContext; +import org.apache.pulsar.jetty.tls.JettySslContextFactory; import org.eclipse.jetty.server.Handler; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; @@ -66,6 +65,8 @@ public class WebService implements AutoCloseable { private final PulsarService pulsar; private final Server server; private final List handlers; + + private final WebExecutorStats executorStats; private final WebExecutorThreadPool webServiceExecutor; public final int maxConcurrentRequests; @@ -79,6 +80,7 @@ public WebService(PulsarService pulsar) throws PulsarServerException { this.webServiceExecutor = new WebExecutorThreadPool( pulsar.getConfiguration().getNumHttpServerThreads(), "pulsar-web"); + this.executorStats = WebExecutorStats.getStats(webServiceExecutor); this.server = new Server(webServiceExecutor); this.maxConcurrentRequests = pulsar.getConfiguration().getMaxConcurrentHttpRequests(); List connectors = new ArrayList<>(); @@ -99,8 +101,8 @@ public WebService(PulsarService pulsar) throws PulsarServerException { SslContextFactory sslCtxFactory; ServiceConfiguration config = pulsar.getConfiguration(); if (config.isTlsEnabledWithKeyStore()) { - sslCtxFactory = KeyStoreSSLContext.createSslContextFactory( - config.getTlsProvider(), + sslCtxFactory = JettySslContextFactory.createServerSslContextWithKeystore( + config.getWebServiceTlsProvider(), config.getTlsKeyStoreType(), config.getTlsKeyStore(), config.getTlsKeyStorePassword(), @@ -109,15 +111,20 @@ public WebService(PulsarService pulsar) throws PulsarServerException { config.getTlsTrustStore(), config.getTlsTrustStorePassword(), config.isTlsRequireTrustedClientCertOnConnect(), + config.getWebServiceTlsCiphers(), + config.getWebServiceTlsProtocols(), config.getTlsCertRefreshCheckDurationSec() ); } else { - sslCtxFactory = SecurityUtility.createSslContextFactory( + sslCtxFactory = JettySslContextFactory.createServerSslContext( + config.getWebServiceTlsProvider(), config.isTlsAllowInsecureConnection(), config.getTlsTrustCertsFilePath(), config.getTlsCertificateFilePath(), config.getTlsKeyFilePath(), - config.isTlsRequireTrustedClientCertOnConnect(), true, + config.isTlsRequireTrustedClientCertOnConnect(), + config.getWebServiceTlsCiphers(), + config.getWebServiceTlsProtocols(), config.getTlsCertRefreshCheckDurationSec()); } httpsConnector = new PulsarServerConnector(server, 1, 1, sslCtxFactory); @@ -271,6 +278,7 @@ public void close() throws PulsarServerException { jettyStatisticsCollector = null; } webServiceExecutor.join(); + this.executorStats.close(); log.info("Web service closed"); } catch (Exception e) { throw new PulsarServerException(e); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/api/RawReader.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/api/RawReader.java index f74157a938914..fe068f2f9435f 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/api/RawReader.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/api/RawReader.java @@ -34,7 +34,7 @@ public interface RawReader { static CompletableFuture create(PulsarClient client, String topic, String subscription) { CompletableFuture> future = new CompletableFuture<>(); RawReader r = new RawReaderImpl((PulsarClientImpl) client, topic, subscription, future); - return future.thenCompose(x -> x.seekAsync(MessageId.earliest)).thenApply(__ -> r); + return future.thenApply(__ -> r); } /** diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java index 217dd5ccc8529..30944289dc3a8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/client/impl/RawReaderImpl.java @@ -31,6 +31,7 @@ import org.apache.pulsar.client.api.RawMessage; import org.apache.pulsar.client.api.RawReader; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; import org.apache.pulsar.common.api.proto.CommandAck.AckType; @@ -56,6 +57,7 @@ public RawReaderImpl(PulsarClientImpl client, String topic, String subscription, consumerConfiguration.setSubscriptionType(SubscriptionType.Exclusive); consumerConfiguration.setReceiverQueueSize(DEFAULT_RECEIVER_QUEUE_SIZE); consumerConfiguration.setReadCompacted(true); + consumerConfiguration.setSubscriptionInitialPosition(SubscriptionInitialPosition.Earliest); consumer = new RawConsumerImpl(client, consumerConfiguration, consumerFuture); @@ -114,6 +116,7 @@ static class RawConsumerImpl extends ConsumerImpl { client.externalExecutorProvider(), TopicName.getPartitionIndex(conf.getSingleTopic()), false, + false, consumerFuture, MessageId.earliest, 0 /* startMessageRollbackDurationInSec */, @@ -151,6 +154,9 @@ void tryCompletePending() { messageAndCnx.msg.close(); closeAsync(); } + MessageIdData messageId = messageAndCnx.msg.getMessageIdData(); + lastDequeuedMessageId = new BatchMessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), + messageId.getPartition(), numMsg - 1); ClientCnx currentCnx = cnx(); if (currentCnx == messageAndCnx.cnx) { @@ -207,9 +213,10 @@ void messageReceived(MessageIdData messageId, int redeliveryCount, log.debug("[{}][{}] Received raw message: {}/{}/{}", topic, subscription, messageId.getEntryId(), messageId.getLedgerId(), messageId.getPartition()); } + incomingRawMessages.add( - new RawMessageAndCnx(new RawMessageImpl(messageId, headersAndPayload), cnx)); - tryCompletePending(); + new RawMessageAndCnx(new RawMessageImpl(messageId, headersAndPayload), cnx)); + internalPinnedExecutor.execute(this::tryCompletePending); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundle.java b/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundle.java index 1531095c32212..98dcb93e7d3db 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundle.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundle.java @@ -152,6 +152,18 @@ public static String getBundleRange(String namespaceBundle) { return namespaceBundle.substring(namespaceBundle.lastIndexOf('/') + 1); } + public static String getBundleNamespace(String namespaceBundle) { + int index = namespaceBundle.lastIndexOf('/'); + if (index != -1) { + try { + return NamespaceName.get(namespaceBundle.substring(0, index)).toString(); + } catch (Exception e) { + // return itself if meets invalid format + } + } + return namespaceBundle; + } + public NamespaceBundleFactory getNamespaceBundleFactory() { return factory; } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundleFactory.java b/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundleFactory.java index 9fbcf17a78b8c..586e3b39aec86 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundleFactory.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/common/naming/NamespaceBundleFactory.java @@ -31,6 +31,7 @@ import com.google.common.collect.Range; import com.google.common.hash.HashFunction; import java.io.IOException; +import java.time.Duration; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -46,6 +47,7 @@ import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.resources.LocalPoliciesResources; import org.apache.pulsar.broker.resources.PulsarResources; +import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.common.policies.data.BundlesData; import org.apache.pulsar.common.policies.data.LocalPolicies; import org.apache.pulsar.common.policies.data.Policies; @@ -64,6 +66,7 @@ public class NamespaceBundleFactory { private final PulsarService pulsar; private final MetadataCache policiesCache; + private final Duration maxRetryDuration = Duration.ofSeconds(10); public NamespaceBundleFactory(PulsarService pulsar, HashFunction hashFunc) { this.hashFunc = hashFunc; @@ -90,22 +93,27 @@ private CompletableFuture loadBundles(NamespaceName namespace, } CompletableFuture future = new CompletableFuture<>(); + doLoadBundles(namespace, future, createBackoff(), System.nanoTime() + maxRetryDuration.toNanos()); + return future; + } + + private void doLoadBundles(NamespaceName namespace, CompletableFuture future, + Backoff backoff, long retryDeadline) { // Read the static bundle data from the policies pulsar.getPulsarResources().getLocalPolicies().getLocalPoliciesWithVersion(namespace).thenAccept(result -> { - if (result.isPresent()) { try { future.complete(readBundles(namespace, result.get().getValue(), result.get().getStat().getVersion())); } catch (IOException e) { - future.completeExceptionally(e); + handleLoadBundlesRetry(namespace, future, backoff, retryDeadline, e); } } else { // If no local policies defined for namespace, copy from global config copyToLocalPolicies(namespace) .thenAccept(b -> future.complete(b)) .exceptionally(ex -> { - future.completeExceptionally(ex); + handleLoadBundlesRetry(namespace, future, backoff, retryDeadline, ex); return null; }); } @@ -113,7 +121,23 @@ private CompletableFuture loadBundles(NamespaceName namespace, future.completeExceptionally(ex); return null; }); - return future; + } + + private void handleLoadBundlesRetry(NamespaceName namespace, + CompletableFuture future, + Backoff backoff, long retryDeadline, Throwable e) { + if (e instanceof Error || System.nanoTime() > retryDeadline) { + future.completeExceptionally(e); + } else { + LOG.warn("Error loading bundle for {}. Retrying exception", namespace, e); + long retryDelay = backoff.next(); + pulsar.getExecutor().schedule(() -> + doLoadBundles(namespace, future, backoff, retryDeadline), retryDelay, TimeUnit.MILLISECONDS); + } + } + + private static Backoff createBackoff() { + return new Backoff(100, TimeUnit.MILLISECONDS, 5, TimeUnit.SECONDS, 0, TimeUnit.MILLISECONDS); } private NamespaceBundles readBundles(NamespaceName namespace, LocalPolicies localPolicies, long version) diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java index 7c969373dbeb4..9e50fc07152f8 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopic.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.compaction; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntriesCallback; import org.apache.bookkeeper.mledger.Entry; @@ -26,11 +27,13 @@ import org.apache.pulsar.broker.service.Consumer; public interface CompactedTopic { - CompletableFuture newCompactedLedger(Position p, long compactedLedgerId); + CompletableFuture newCompactedLedger(Position p, long compactedLedgerId); + CompletableFuture deleteCompactedLedger(long compactedLedgerId); void asyncReadEntriesOrWait(ManagedCursor cursor, int numberOfEntriesToRead, boolean isFirstRead, ReadEntriesCallback callback, Consumer consumer); CompletableFuture readLastEntryOfCompactedLedger(); + Optional getCompactionHorizon(); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java index 131341318e0f9..aac213fcadcd1 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactedTopicImpl.java @@ -64,7 +64,7 @@ public CompactedTopicImpl(BookKeeper bk) { } @Override - public CompletableFuture newCompactedLedger(Position p, long compactedLedgerId) { + public CompletableFuture newCompactedLedger(Position p, long compactedLedgerId) { synchronized (this) { compactionHorizon = (PositionImpl) p; @@ -72,15 +72,16 @@ public CompletableFuture newCompactedLedger(Position p, long compactedLedgerI compactedTopicContext = openCompactedLedger(bk, compactedLedgerId); // delete the ledger from the old context once the new one is open - if (previousContext != null) { - return compactedTopicContext.thenCompose((res) -> previousContext) - .thenCompose((res) -> tryDeleteCompactedLedger(bk, res.ledger.getId())); - } else { - return compactedTopicContext; - } + return compactedTopicContext.thenCompose(__ -> + previousContext != null ? previousContext : CompletableFuture.completedFuture(null)); } } + @Override + public CompletableFuture deleteCompactedLedger(long compactedLedgerId) { + return tryDeleteCompactedLedger(bk, compactedLedgerId); + } + @Override public void asyncReadEntriesOrWait(ManagedCursor cursor, int numberOfEntriesToRead, @@ -122,7 +123,12 @@ public void asyncReadEntriesOrWait(ManagedCursor cursor, return readEntries(context.ledger, startPoint, endPoint) .thenAccept((entries) -> { Entry lastEntry = entries.get(entries.size() - 1); - cursor.seek(lastEntry.getPosition().getNext()); + // The compaction task depends on the last snapshot and the incremental + // entries to build the new snapshot. So for the compaction cursor, we + // need to force seek the read position to ensure the compactor can read + // the complete last snapshot because of the compactor will read the data + // before the compaction cursor mark delete position + cursor.seek(lastEntry.getPosition().getNext(), true); callback.readEntriesComplete(entries, consumer); }); } @@ -286,11 +292,16 @@ public CompletableFuture readLastEntryOfCompactedLedger() { if (compactionHorizon == null) { return CompletableFuture.completedFuture(null); } - return compactedTopicContext.thenCompose(context -> - readEntries(context.ledger, context.ledger.getLastAddConfirmed(), context.ledger.getLastAddConfirmed()) - .thenCompose(entries -> entries.size() > 0 - ? CompletableFuture.completedFuture(entries.get(0)) - : CompletableFuture.completedFuture(null))); + return compactedTopicContext.thenCompose(context -> { + if (context.ledger.getLastAddConfirmed() == -1) { + return CompletableFuture.completedFuture(null); + } + return readEntries( + context.ledger, context.ledger.getLastAddConfirmed(), context.ledger.getLastAddConfirmed()) + .thenCompose(entries -> entries.size() > 0 + ? CompletableFuture.completedFuture(entries.get(0)) + : CompletableFuture.completedFuture(null)); + }); } private static int comparePositionAndMessageId(PositionImpl p, MessageIdData m) { @@ -298,6 +309,10 @@ private static int comparePositionAndMessageId(PositionImpl p, MessageIdData m) .compare(p.getLedgerId(), m.getLedgerId()) .compare(p.getEntryId(), m.getEntryId()).result(); } + + public synchronized Optional getCompactionHorizon() { + return Optional.ofNullable(this.compactionHorizon); + } private static final Logger log = LoggerFactory.getLogger(CompactedTopicImpl.class); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java index 1140c49db87f1..35ca089e5df9b 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/compaction/CompactorTool.java @@ -37,6 +37,7 @@ import org.apache.pulsar.broker.ServiceConfigurationUtils; import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; import org.apache.pulsar.common.util.CmdGenerateDocs; import org.apache.pulsar.common.util.netty.EventLoopUtil; @@ -105,12 +106,17 @@ public static void main(String[] args) throws Exception { ClientBuilder clientBuilder = PulsarClient.builder(); + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + clientBuilder.loadConf(PropertiesUtils.filterAndMapProperties(brokerConfig.getProperties(), "brokerClient_")); + if (isNotBlank(brokerConfig.getBrokerClientAuthenticationPlugin())) { clientBuilder.authentication(brokerConfig.getBrokerClientAuthenticationPlugin(), brokerConfig.getBrokerClientAuthenticationParameters()); } - AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig); + AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar+ssl"); if (internalListener.getBrokerServiceUrlTls() != null) { log.info("Found a TLS-based advertised listener in configuration file. \n" + "Will connect pulsar use TLS."); @@ -120,6 +126,7 @@ public static void main(String[] args) throws Exception { .tlsTrustCertsFilePath(brokerConfig.getTlsCertificateFilePath()); } else { + internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar"); clientBuilder.serviceUrl(internalListener.getBrokerServiceUrl().toString()); } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSet.java b/pulsar-broker/src/main/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSet.java new file mode 100644 index 0000000000000..c9f1c65daca37 --- /dev/null +++ b/pulsar-broker/src/main/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSet.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.utils; + +import java.util.Iterator; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.pulsar.common.util.collections.LongPairSet; +import org.roaringbitmap.RoaringBitmap; + +public class ConcurrentBitmapSortedLongPairSet { + + private final NavigableMap map = new TreeMap<>(); + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + public void add(long item1, long item2) { + lock.writeLock().lock(); + try { + RoaringBitmap bitSet = map.computeIfAbsent(item1, k -> new RoaringBitmap()); + bitSet.add(item2, item2 + 1); + } finally { + lock.writeLock().unlock(); + } + } + + public void remove(long item1, long item2) { + lock.writeLock().lock(); + try { + RoaringBitmap bitSet = map.get(item1); + if (bitSet != null) { + bitSet.remove(item2, item2 + 1); + if (bitSet.isEmpty()) { + map.remove(item1, bitSet); + } + } + } finally { + lock.writeLock().unlock(); + } + } + + public boolean contains(long item1, long item2) { + lock.readLock().lock(); + try { + RoaringBitmap bitSet = map.get(item1); + return bitSet != null && bitSet.contains(item2, item2 + 1); + } finally { + lock.readLock().unlock(); + } + } + + public void removeUpTo(long item1, long item2) { + lock.writeLock().lock(); + try { + Map.Entry firstEntry = map.firstEntry(); + while (firstEntry != null && firstEntry.getKey() <= item1) { + if (firstEntry.getKey() < item1) { + map.remove(firstEntry.getKey(), firstEntry.getValue()); + } else { + RoaringBitmap bitSet = firstEntry.getValue(); + if (bitSet != null) { + bitSet.remove(0, item2); + if (bitSet.isEmpty()) { + map.remove(firstEntry.getKey(), bitSet); + } + } + break; + } + firstEntry = map.firstEntry(); + } + } finally { + lock.writeLock().unlock(); + } + } + + + public Set items(int numberOfItems, LongPairSet.LongPairFunction longPairConverter) { + NavigableSet items = new TreeSet<>(); + lock.readLock().lock(); + try { + for (Map.Entry entry : map.entrySet()) { + Iterator iterator = entry.getValue().stream().iterator(); + while (iterator.hasNext() && items.size() < numberOfItems) { + items.add(longPairConverter.apply(entry.getKey(), iterator.next())); + } + if (items.size() == numberOfItems) { + break; + } + } + } finally { + lock.readLock().unlock(); + } + return items; + } + + public boolean isEmpty() { + lock.readLock().lock(); + try { + return map.isEmpty() || map.values().stream().allMatch(RoaringBitmap::isEmpty); + } finally { + lock.readLock().unlock(); + } + } + + public void clear() { + lock.writeLock().lock(); + try { + map.clear(); + } finally { + lock.writeLock().unlock(); + } + } + + public int size() { + lock.readLock().lock(); + try { + return map.isEmpty() ? 0 : map.values().stream().mapToInt(RoaringBitmap::getCardinality).sum(); + } finally { + lock.readLock().unlock(); + } + } +} diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/utils/auth/tokens/TokensCliUtils.java b/pulsar-broker/src/main/java/org/apache/pulsar/utils/auth/tokens/TokensCliUtils.java index c089fa09953d4..47364bc68bdb4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/utils/auth/tokens/TokensCliUtils.java +++ b/pulsar-broker/src/main/java/org/apache/pulsar/utils/auth/tokens/TokensCliUtils.java @@ -22,6 +22,7 @@ import com.beust.jcommander.IUsageFormatter; import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParameterException; import com.beust.jcommander.Parameters; import com.google.common.base.Charsets; import io.jsonwebtoken.Claims; @@ -155,8 +156,13 @@ public void run() throws Exception { Optional optExpiryTime = Optional.empty(); if (expiryTime != null) { - long relativeTimeMillis = TimeUnit.SECONDS - .toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(expiryTime)); + long relativeTimeMillis; + try { + relativeTimeMillis = TimeUnit.SECONDS.toMillis( + RelativeTimeUtil.parseRelativeTimeInSeconds(expiryTime)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } optExpiryTime = Optional.of(new Date(System.currentTimeMillis() + relativeTimeMillis)); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerMessageDeduplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerMessageDeduplicationTest.java new file mode 100644 index 0000000000000..f2d7a21a40f0b --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerMessageDeduplicationTest.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker; + +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +import org.apache.bookkeeper.mledger.ManagedLedger; +import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.persistent.MessageDeduplication; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.testng.annotations.Test; + +public class BrokerMessageDeduplicationTest { + + @Test + public void markerMessageNotDeduplicated() { + PulsarService pulsarService = mock(PulsarService.class); + ServiceConfiguration configuration = new ServiceConfiguration(); + doReturn(configuration).when(pulsarService).getConfiguration(); + MessageDeduplication deduplication = spy(new MessageDeduplication(pulsarService, + mock(PersistentTopic.class), mock(ManagedLedger.class))); + doReturn(true).when(deduplication).isEnabled(); + Topic.PublishContext context = mock(Topic.PublishContext.class); + doReturn(true).when(context).isMarkerMessage(); + MessageDeduplication.MessageDupStatus status = deduplication.isDuplicate(context, null); + assertEquals(status, MessageDeduplication.MessageDupStatus.NotDup); + } + + @Test + public void markerMessageNotRecordPersistent() { + PulsarService pulsarService = mock(PulsarService.class); + ServiceConfiguration configuration = new ServiceConfiguration(); + doReturn(configuration).when(pulsarService).getConfiguration(); + MessageDeduplication deduplication = spy(new MessageDeduplication(pulsarService, + mock(PersistentTopic.class), mock(ManagedLedger.class))); + doReturn(true).when(deduplication).isEnabled(); + Topic.PublishContext context = mock(Topic.PublishContext.class); + // marker message don't record message persisted. + doReturn(true).when(context).isMarkerMessage(); + deduplication.recordMessagePersisted(context, null); + + // if is not a marker message, we will get NPE. because context is mocked with null value fields. + doReturn(false).when(context).isMarkerMessage(); + try { + deduplication.recordMessagePersisted(context, null); + fail(); + } catch (Exception npe) { + assertTrue(npe instanceof NullPointerException); + } + } + + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java index ff03e425ccbcd..224060c9d912e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/BrokerTestUtil.java @@ -19,6 +19,7 @@ package org.apache.pulsar.broker; import java.util.UUID; +import org.mockito.Mockito; /** * Holds util methods used in test. @@ -29,4 +30,18 @@ public static String newUniqueName(String prefix) { return prefix + "-" + UUID.randomUUID(); } + /** + * Creates a Mockito spy directly without an intermediate instance to spy. + * This is to address flaky test issue where a spy created with a given instance fails with + * {@link org.mockito.exceptions.misusing.WrongTypeOfReturnValue} exception. + * + * @param classToSpy the class to spy + * @param args the constructor arguments to use when creating the spy instance + * @return a spy of the provided class created with given constructor arguments + */ + public static T spyWithClassAndConstructorArgs(Class classToSpy, Object... args) { + return Mockito.mock(classToSpy, Mockito.withSettings() + .useConstructor(args) + .defaultAnswer(Mockito.CALLS_REAL_METHODS)); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/MultiBrokerBaseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/MultiBrokerBaseTest.java new file mode 100644 index 0000000000000..c00ae8cd0d39d --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/MultiBrokerBaseTest.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminBuilder; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.MockZooKeeperSession; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; + +public abstract class MultiBrokerBaseTest extends MockedPulsarServiceBaseTest { + protected List additionalBrokers; + protected List additionalBrokerAdmins; + protected List additionalBrokerClients; + + protected int numberOfAdditionalBrokers() { + return 2; + } + + @BeforeClass(alwaysRun = true) + @Override + public final void setup() throws Exception { + super.internalSetup(); + additionalBrokersSetup(); + pulsarResourcesSetup(); + } + + protected void pulsarResourcesSetup() throws PulsarAdminException { + admin.tenants().createTenant("public", createDefaultTenantInfo()); + admin.namespaces() + .createNamespace("public/default", getPulsar().getConfiguration().getDefaultNumberOfNamespaceBundles()); + } + + protected void additionalBrokersSetup() throws Exception { + int numberOfAdditionalBrokers = numberOfAdditionalBrokers(); + additionalBrokers = new ArrayList<>(numberOfAdditionalBrokers); + additionalBrokerAdmins = new ArrayList<>(numberOfAdditionalBrokers); + additionalBrokerClients = new ArrayList<>(numberOfAdditionalBrokers); + for (int i = 0; i < numberOfAdditionalBrokers; i++) { + PulsarService pulsarService = createAdditionalBroker(i); + additionalBrokers.add(i, pulsarService); + PulsarAdminBuilder pulsarAdminBuilder = + PulsarAdmin.builder().serviceHttpUrl(pulsarService.getWebServiceAddress() != null + ? pulsarService.getWebServiceAddress() + : pulsarService.getWebServiceAddressTls()); + customizeNewPulsarAdminBuilder(pulsarAdminBuilder); + additionalBrokerAdmins.add(i, pulsarAdminBuilder.build()); + additionalBrokerClients.add(i, newPulsarClient(pulsarService.getBrokerServiceUrl(), 0)); + } + } + + protected ServiceConfiguration createConfForAdditionalBroker(int additionalBrokerIndex) { + return getDefaultConf(); + } + + protected PulsarService createAdditionalBroker(int additionalBrokerIndex) throws Exception { + return startBroker(createConfForAdditionalBroker(additionalBrokerIndex)); + } + + @Override + protected MetadataStoreExtended createLocalMetadataStore() throws MetadataStoreException { + // use MockZooKeeperSession to provide a unique session id for each instance + return new ZKMetadataStore(MockZooKeeperSession.newInstance(mockZooKeeper)); + } + + @Override + protected MetadataStoreExtended createConfigurationMetadataStore() throws MetadataStoreException { + // use MockZooKeeperSession to provide a unique session id for each instance + return new ZKMetadataStore(MockZooKeeperSession.newInstance(mockZooKeeperGlobal)); + } + + @AfterClass(alwaysRun = true) + @Override + public final void cleanup() throws Exception { + additionalBrokersCleanup(); + super.internalCleanup(); + } + + protected void additionalBrokersCleanup() { + if (additionalBrokerAdmins != null) { + for (PulsarAdmin additionalBrokerAdmin : additionalBrokerAdmins) { + additionalBrokerAdmin.close(); + } + additionalBrokerAdmins = null; + } + if (additionalBrokerClients != null) { + for (PulsarClient additionalBrokerClient : additionalBrokerClients) { + try { + additionalBrokerClient.shutdown(); + } catch (PulsarClientException e) { + // ignore + } + } + additionalBrokerClients = null; + } + if (additionalBrokers != null) { + for (PulsarService pulsarService : additionalBrokers) { + try { + pulsarService.getConfiguration().setBrokerShutdownTimeoutMs(0L); + pulsarService.close(); + } catch (PulsarServerException e) { + // ignore + } + } + additionalBrokers = null; + } + } + + public final List getAllBrokers() { + List brokers = new ArrayList<>(numberOfAdditionalBrokers() + 1); + brokers.add(getPulsar()); + brokers.addAll(additionalBrokers); + return Collections.unmodifiableList(brokers); + } + + public final List getAllAdmins() { + List admins = new ArrayList<>(numberOfAdditionalBrokers() + 1); + admins.add(admin); + admins.addAll(additionalBrokerAdmins); + return Collections.unmodifiableList(admins); + } + + public final List getAllClients() { + List clients = new ArrayList<>(numberOfAdditionalBrokers() + 1); + clients.add(pulsarClient); + clients.addAll(additionalBrokerClients); + return Collections.unmodifiableList(clients); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest2.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java similarity index 94% rename from pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest2.java rename to pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java index 33efa2bc08b1d..e0fac522f27db 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest2.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApi2Test.java @@ -36,7 +36,6 @@ import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -45,7 +44,6 @@ import java.util.Optional; import java.util.Set; import java.util.UUID; -import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.TimeUnit; import javax.ws.rs.core.Response.Status; import lombok.Cleanup; @@ -82,6 +80,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AutoFailoverPolicyData; import org.apache.pulsar.common.policies.data.AutoFailoverPolicyType; +import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.BrokerNamespaceIsolationData; import org.apache.pulsar.common.policies.data.BrokerNamespaceIsolationDataImpl; import org.apache.pulsar.common.policies.data.BundlesData; @@ -97,6 +96,7 @@ import org.apache.pulsar.common.policies.data.SubscriptionStats; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.common.policies.data.impl.BacklogQuotaImpl; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -106,7 +106,7 @@ @Slf4j @Test(groups = "broker") -public class AdminApiTest2 extends MockedPulsarServiceBaseTest { +public class AdminApi2Test extends MockedPulsarServiceBaseTest { private MockedPulsarService mockPulsarSetup; @@ -1026,9 +1026,11 @@ public void brokerNamespaceIsolationPoliciesUpdateOnTime() throws Exception { parameters1.put("min_limit", "1"); parameters1.put("usage_threshold", "100"); + final List primaryList = new ArrayList<>(); + primaryList.add(brokerName + ".*"); NamespaceIsolationData nsPolicyData1 = NamespaceIsolationData.builder() .namespaces(Collections.singletonList(ns1Name)) - .primary(Collections.singletonList(brokerName + ".*")) + .primary(primaryList) .autoFailoverPolicy(AutoFailoverPolicyData.builder() .policyType(AutoFailoverPolicyType.min_available) .parameters(parameters1) @@ -1251,7 +1253,7 @@ public void testPreciseBacklog() throws PulsarClientException, PulsarAdminExcept assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 10); topicStats = admin.topics().getStats(topic, true, true); - assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 43); + assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 40); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 1); consumer.acknowledge(message); @@ -1267,6 +1269,143 @@ public void testPreciseBacklog() throws PulsarClientException, PulsarAdminExcept assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 9); } + @Test + public void testDeleteTenant() throws Exception { + pulsar.getConfiguration().setForceDeleteNamespaceAllowed(false); + + String tenant = "test-tenant-1"; + assertFalse(admin.tenants().getTenants().contains(tenant)); + + // create tenant + admin.tenants().createTenant(tenant, + new TenantInfoImpl(Sets.newHashSet("role1", "role2"), Sets.newHashSet("test"))); + assertTrue(admin.tenants().getTenants().contains(tenant)); + + // create namespace + String namespace = tenant + "/test-ns-1"; + admin.namespaces().createNamespace(namespace, Sets.newHashSet("test")); + assertEquals(admin.namespaces().getNamespaces(tenant), Lists.newArrayList(namespace)); + + // create topic + String topic = namespace + "/test-topic-1"; + admin.topics().createPartitionedTopic(topic, 10); + assertFalse(admin.topics().getList(namespace).isEmpty()); + + try { + admin.namespaces().deleteNamespace(namespace, false); + fail("should have failed due to namespace not empty"); + } catch (PulsarAdminException e) { + // Expected: cannot delete non-empty tenant + } + + // delete topic + admin.topics().deletePartitionedTopic(topic); + assertTrue(admin.topics().getList(namespace).isEmpty()); + + // delete namespace + admin.namespaces().deleteNamespace(namespace, false); + assertFalse(admin.namespaces().getNamespaces(tenant).contains(namespace)); + assertTrue(admin.namespaces().getNamespaces(tenant).isEmpty()); + + // delete tenant + admin.tenants().deleteTenant(tenant); + assertFalse(admin.tenants().getTenants().contains(tenant)); + + final String managedLedgersPath = "/managed-ledgers/" + tenant; + final String bundleDataPath = "/loadbalance/bundle-data/" + tenant; + assertFalse(pulsar.getLocalMetadataStore().exists(managedLedgersPath).join()); + assertFalse(pulsar.getLocalMetadataStore().exists(bundleDataPath).join()); + } + + @Test + public void testDeleteNamespace() throws Exception { + pulsar.getConfiguration().setForceDeleteNamespaceAllowed(false); + + String tenant = "test-tenant"; + assertFalse(admin.tenants().getTenants().contains(tenant)); + + // create tenant + admin.tenants().createTenant(tenant, + new TenantInfoImpl(Sets.newHashSet("role1", "role2"), Sets.newHashSet("test"))); + assertTrue(admin.tenants().getTenants().contains(tenant)); + + // create namespace + String namespace = tenant + "/test-ns"; + admin.namespaces().createNamespace(namespace, Sets.newHashSet("test")); + assertEquals(admin.namespaces().getNamespaces(tenant), Lists.newArrayList(namespace)); + + // create topic + String topic = namespace + "/test-topic"; + admin.topics().createPartitionedTopic(topic, 10); + assertFalse(admin.topics().getList(namespace).isEmpty()); + + try { + admin.namespaces().deleteNamespace(namespace, false); + fail("should have failed due to namespace not empty"); + } catch (PulsarAdminException e) { + // Expected: cannot delete non-empty tenant + } + + // delete topic + admin.topics().deletePartitionedTopic(topic); + assertTrue(admin.topics().getList(namespace).isEmpty()); + + // delete namespace + admin.namespaces().deleteNamespace(namespace, false); + assertFalse(admin.namespaces().getNamespaces(tenant).contains(namespace)); + assertTrue(admin.namespaces().getNamespaces(tenant).isEmpty()); + + + final String managedLedgersPath = "/managed-ledgers/" + namespace; + assertFalse(pulsar.getLocalMetadataStore().exists(managedLedgersPath).join()); + + final String bundleDataPath = "/loadbalance/bundle-data/" + namespace; + assertFalse(pulsar.getLocalMetadataStore().exists(bundleDataPath).join()); + } + + @Test + public void testDeleteNamespaceWithTopicPolicies() throws Exception { + stopBroker(); + conf.setSystemTopicEnabled(true); + conf.setTopicLevelPoliciesEnabled(true); + setup(); + + String tenant = "test-tenant"; + assertFalse(admin.tenants().getTenants().contains(tenant)); + + // create tenant + admin.tenants().createTenant(tenant, + new TenantInfoImpl(Sets.newHashSet("role1", "role2"), Sets.newHashSet("test"))); + assertTrue(admin.tenants().getTenants().contains(tenant)); + + // create namespace2 + String namespace = tenant + "/test-ns2"; + admin.namespaces().createNamespace(namespace, Sets.newHashSet("test")); + // create topic + String topic = namespace + "/test-topic2"; + Producer producer = pulsarClient.newProducer().topic(topic).create(); + producer.send("test".getBytes(StandardCharsets.UTF_8)); + BacklogQuota backlogQuota = BacklogQuotaImpl + .builder() + .limitTime(1000) + .limitSize(1000) + .retentionPolicy(BacklogQuota.RetentionPolicy.producer_exception) + .build(); + admin.topicPolicies().setBacklogQuota(topic, backlogQuota); + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(admin.topicPolicies() + .getBacklogQuotaMap(topic) + .get(BacklogQuota.BacklogQuotaType.destination_storage), backlogQuota); + }); + producer.close(); + admin.topics().delete(topic); + admin.namespaces().deleteNamespace(namespace); + Awaitility.await().untilAsserted(() -> { + assertTrue(admin.namespaces().getNamespaces(tenant).isEmpty()); + }); + } + + @Test(timeOut = 30000) public void testBacklogNoDelayed() throws PulsarClientException, PulsarAdminException, InterruptedException { final String topic = "persistent://prop-xyz/ns1/precise-back-log-no-delayed-" + UUID.randomUUID().toString(); @@ -1362,7 +1501,7 @@ public void testPreciseBacklogForPartitionedTopic() throws PulsarClientException topicStats = admin.topics().getPartitionedStats(topic, false, true, true); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 1); - assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 43); + assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 40); } @Test(timeOut = 30000) @@ -1401,7 +1540,7 @@ public void testBacklogNoDelayedForPartitionedTopic() throws PulsarClientExcepti TopicStats topicStats = admin.topics().getPartitionedStats(topic, false, true, true); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklog(), 10); - assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 470); + assertEquals(topicStats.getSubscriptions().get(subName).getBacklogSize(), 440); assertEquals(topicStats.getSubscriptions().get(subName).getMsgBacklogNoDelayed(), 5); for (int i = 0; i < 5; i++) { @@ -1411,7 +1550,7 @@ public void testBacklogNoDelayedForPartitionedTopic() throws PulsarClientExcepti Awaitility.await().untilAsserted(() -> { TopicStats topicStats2 = admin.topics().getPartitionedStats(topic, false, true, true); assertEquals(topicStats2.getSubscriptions().get(subName).getMsgBacklog(), 5); - assertEquals(topicStats2.getSubscriptions().get(subName).getBacklogSize(), 238); + assertEquals(topicStats2.getSubscriptions().get(subName).getBacklogSize(), 223); assertEquals(topicStats2.getSubscriptions().get(subName).getMsgBacklogNoDelayed(), 0); }); @@ -1477,60 +1616,6 @@ public void testForceDeleteNamespace() throws Exception { } } - @Test - public void testDistinguishTopicTypeWhenForceDeleteNamespace() throws Exception { - conf.setForceDeleteNamespaceAllowed(true); - final String ns = "prop-xyz/distinguish-topic-type-ns"; - final String exNs = "prop-xyz/ex-distinguish-topic-type-ns"; - admin.namespaces().createNamespace(ns, 2); - admin.namespaces().createNamespace(exNs, 2); - - final String p1 = "persistent://" + ns + "/p1"; - final String p5 = "persistent://" + ns + "/p5"; - final String np = "persistent://" + ns + "/np"; - - admin.topics().createPartitionedTopic(p1, 1); - admin.topics().createPartitionedTopic(p5, 5); - admin.topics().createNonPartitionedTopic(np); - - final String exNp = "persistent://" + exNs + "/np"; - admin.topics().createNonPartitionedTopic(exNp); - // insert an invalid topic name - pulsar.getLocalMetadataStore().put( - "/managed-ledgers/" + exNs + "/persistent/", "".getBytes(), Optional.empty()).join(); - - List topics = pulsar.getNamespaceService().getFullListOfTopics(NamespaceName.get(ns)).get(); - List exTopics = pulsar.getNamespaceService().getFullListOfTopics(NamespaceName.get(exNs)).get(); - - // ensure that the topic list contains all the topics - List allTopics = new ArrayList<>(Arrays.asList(np, TopicName.get(p1).getPartition(0).toString())); - for (int i = 0; i < 5; i++) { - allTopics.add(TopicName.get(p5).getPartition(i).toString()); - } - Assert.assertEquals(allTopics.stream().filter(t -> !topics.contains(t)).count(), 0); - Assert.assertTrue(exTopics.contains("persistent://" + exNs + "/")); - // partition num = p1 + p5 + np - Assert.assertEquals(topics.size(), 1 + 5 + 1); - Assert.assertEquals(exTopics.size(), 1 + 1); - - admin.namespaces().deleteNamespace(ns, true); - Arrays.asList(p1, p5, np).forEach(t -> { - try { - admin.schemas().getSchemaInfo(t); - } catch (PulsarAdminException e) { - // all the normal topics' schemas have been deleted - Assert.assertEquals(e.getStatusCode(), 404); - } - }); - - try { - admin.namespaces().deleteNamespace(exNs, true); - fail("Should fail due to invalid topic"); - } catch (Exception e) { - //ok - } - } - @Test public void testUpdateClusterWithProxyUrl() throws Exception { ClusterData cluster = ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build(); @@ -1622,11 +1707,7 @@ public void testMaxTopicsPerNamespace() throws Exception { for (int i = 0; i < 5; ++i) { admin.topics().createPartitionedTopic(topic + i, 1); } - admin.topics().createPartitionedTopic("persistent://testTenant/ns1/__change_events", 2); - admin.topics().createPartitionedTopic("persistent://testTenant/ns1/__transaction_buffer_snapshot", 2); - admin.topics().createPartitionedTopic( - "persistent://testTenant/ns1/__transaction_buffer_snapshot-multiTopicsReader" - + "-05c0ded5e9__transaction_pending_ack", 2); + admin.topics().createPartitionedTopic("persistent://testTenant/ns1/__change_events", 6); // check first create system topics, then normal topic, unlimited even setMaxTopicsPerNamespace @@ -1636,11 +1717,7 @@ public void testMaxTopicsPerNamespace() throws Exception { admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); admin.tenants().createTenant("testTenant", tenantInfo); admin.namespaces().createNamespace("testTenant/ns1", Sets.newHashSet("test")); - admin.topics().createPartitionedTopic("persistent://testTenant/ns1/__change_events", 2); - admin.topics().createPartitionedTopic("persistent://testTenant/ns1/__transaction_buffer_snapshot", 2); - admin.topics().createPartitionedTopic( - "persistent://testTenant/ns1/__transaction_buffer_snapshot-multiTopicsReader" - + "-05c0ded5e9__transaction_pending_ack", 2); + admin.topics().createPartitionedTopic("persistent://testTenant/ns1/__change_events", 6); for (int i = 0; i < 5; ++i) { admin.topics().createPartitionedTopic(topic + i, 1); } @@ -1811,6 +1888,7 @@ public void testMaxSubPerTopicApi() throws Exception { @Test(timeOut = 30000) public void testMaxSubPerTopic() throws Exception { + pulsar.getConfiguration().setMaxSubscriptionsPerTopic(0); final String myNamespace = "prop-xyz/ns" + UUID.randomUUID(); admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); final String topic = "persistent://" + myNamespace + "/testMaxSubPerTopic"; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiClusterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiClusterTest.java new file mode 100644 index 0000000000000..83ef9af7a2e8f --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiClusterTest.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import com.google.common.collect.Sets; +import java.util.UUID; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.FailureDomain; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker") +@Slf4j +public class AdminApiClusterTest extends MockedPulsarServiceBaseTest { + private final String CLUSTER = "test"; + + @BeforeMethod + @Override + public void setup() throws Exception { + resetConfig(); + super.internalSetup(); + admin.clusters() + .createCluster(CLUSTER, ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); + } + + @AfterMethod(alwaysRun = true) + @Override + public void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testDeleteNonExistCluster() { + String cluster = "test-non-exist-cluster-" + UUID.randomUUID(); + + assertThrows(PulsarAdminException.NotFoundException.class, () -> admin.clusters().deleteCluster(cluster)); + } + + @Test + public void testDeleteExistCluster() throws PulsarAdminException { + String cluster = "test-exist-cluster-" + UUID.randomUUID(); + + admin.clusters() + .createCluster(cluster, ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); + Awaitility.await().untilAsserted(() -> assertNotNull(admin.clusters().getCluster(cluster))); + + admin.clusters().deleteCluster(cluster); + } + + @Test + public void testDeleteNonExistentFailureDomain() { + assertThrows(PulsarAdminException.NotFoundException.class, + () -> admin.clusters().deleteFailureDomain(CLUSTER, "non-existent-failure-domain")); + } + + @Test + public void testDeleteNonExistentFailureDomainInNonExistCluster() { + assertThrows(PulsarAdminException.PreconditionFailedException.class, + () -> admin.clusters().deleteFailureDomain(CLUSTER + UUID.randomUUID(), + "non-existent-failure-domain")); + } + + @Test + public void testDeleteExistFailureDomain() throws PulsarAdminException { + String domainName = CLUSTER + "-failure-domain"; + FailureDomain domain = FailureDomain.builder() + .brokers(Sets.newHashSet("b1", "b2", "b3")) + .build(); + admin.clusters().createFailureDomain(CLUSTER, domainName, domain); + Awaitility.await().untilAsserted(() -> admin.clusters().getFailureDomain(CLUSTER, domainName)); + + admin.clusters().deleteFailureDomain(CLUSTER, domainName); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiDynamicConfigurationsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiDynamicConfigurationsTest.java new file mode 100644 index 0000000000000..d3e4b2a4bd829 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiDynamicConfigurationsTest.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import static org.junit.Assert.fail; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import java.util.Map; +import javax.ws.rs.core.Response; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class AdminApiDynamicConfigurationsTest extends MockedPulsarServiceBaseTest { + @BeforeMethod + @Override + public void setup() throws Exception { + super.internalSetup(); + } + + @AfterMethod(alwaysRun = true) + @Override + public void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void TestGetAllDynamicConfigurations() throws Exception { + Map configs = admin.brokers().getAllDynamicConfigurations(); + assertNotNull(configs); + } + + @Test + public void TestDeleteDynamicConfiguration() throws Exception { + admin.brokers().deleteDynamicConfiguration("dispatcherMinReadBatchSize"); + } + + @Test + public void TestDeleteInvalidDynamicConfiguration() { + try { + admin.brokers().deleteDynamicConfiguration("errorName"); + fail("exception should be thrown"); + } catch (Exception e) { + if (e instanceof PulsarAdminException) { + assertEquals(((PulsarAdminException) e).getStatusCode(), Response.Status.PRECONDITION_FAILED.getStatusCode()); + } else { + fail("PulsarAdminException should be thrown"); + } + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java index cba0686383157..ea0b5c2a155c4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiGetLastMessageIdTest.java @@ -81,7 +81,7 @@ protected void setup() throws Exception { new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet("test"))); admin.namespaces().createNamespace("prop/ns-abc"); admin.namespaces().setNamespaceReplicationClusters("prop/ns-abc", Sets.newHashSet("test")); - persistentTopics = spy(new PersistentTopics()); + persistentTopics = spy(PersistentTopics.class); persistentTopics.setServletContext(new MockServletContext()); persistentTopics.setPulsar(pulsar); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java index e0c887f3e3dfc..b4b97cad340a5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiHealthCheckTest.java @@ -25,11 +25,15 @@ import org.apache.pulsar.common.naming.TopicVersion; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.compaction.Compactor; +import org.awaitility.Awaitility; +import org.junit.Assert; +import org.springframework.util.CollectionUtils; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; - -import java.net.URL; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; @Test(groups = "broker") @Slf4j @@ -58,26 +62,100 @@ public void cleanup() throws Exception { @Test public void testHealthCheckup() throws Exception { - admin.brokers().healthcheck(); + final int times = 30; + CompletableFuture future = new CompletableFuture<>(); + pulsar.getExecutor().execute(() -> { + try { + for (int i = 0; i < times; i++) { + admin.brokers().healthcheck(); + } + future.complete(null); + }catch (PulsarAdminException e) { + future.completeExceptionally(e); + } + }); + for (int i = 0; i < times; i++) { + admin.brokers().healthcheck(); + } + // To ensure we don't have any subscription + final String testHealthCheckTopic = String.format("persistent://pulsar/test/localhost:%s/healthcheck", + pulsar.getConfig().getWebServicePort().get()); + Awaitility.await().untilAsserted(() -> { + Assert.assertFalse(future.isCompletedExceptionally()); + }); + Awaitility.await().untilAsserted(() -> + Assert.assertTrue(CollectionUtils.isEmpty(admin.topics() + .getSubscriptions(testHealthCheckTopic).stream() + // All system topics are using compaction, even though is not explicitly set in the policies. + .filter(v -> !v.equals(Compactor.COMPACTION_SUBSCRIPTION)) + .collect(Collectors.toList()) + )) + ); } @Test public void testHealthCheckupV1() throws Exception { - admin.brokers().healthcheck(TopicVersion.V1); - } - - @Test(expectedExceptions = PulsarAdminException.class) - public void testHealthCheckupV2Error() throws Exception { - admin.brokers().healthcheck(TopicVersion.V2); + final int times = 30; + CompletableFuture future = new CompletableFuture<>(); + pulsar.getExecutor().execute(() -> { + try { + for (int i = 0; i < times; i++) { + admin.brokers().healthcheck(TopicVersion.V1); + } + future.complete(null); + }catch (PulsarAdminException e) { + future.completeExceptionally(e); + } + }); + for (int i = 0; i < times; i++) { + admin.brokers().healthcheck(TopicVersion.V1); + } + final String testHealthCheckTopic = String.format("persistent://pulsar/test/localhost:%s/healthcheck", + pulsar.getConfig().getWebServicePort().get()); + Awaitility.await().untilAsserted(() -> { + Assert.assertFalse(future.isCompletedExceptionally()); + }); + // To ensure we don't have any subscription + Awaitility.await().untilAsserted(() -> + Assert.assertTrue(CollectionUtils.isEmpty(admin.topics() + .getSubscriptions(testHealthCheckTopic).stream() + // All system topics are using compaction, even though is not explicitly set in the policies. + .filter(v -> !v.equals(Compactor.COMPACTION_SUBSCRIPTION)) + .collect(Collectors.toList()) + )) + ); } @Test public void testHealthCheckupV2() throws Exception { - final URL pulsarWebAddress = new URL(pulsar.getWebServiceAddress()); - final String targetNameSpace = "pulsar/" + - pulsarWebAddress.getHost() + ":" + pulsarWebAddress.getPort(); - log.info("Target namespace for broker admin healthcheck V2 endpoint is {}", targetNameSpace); - admin.namespaces().createNamespace(targetNameSpace); - admin.brokers().healthcheck(TopicVersion.V2); + final int times = 30; + CompletableFuture future = new CompletableFuture<>(); + pulsar.getExecutor().execute(() -> { + try { + for (int i = 0; i < times; i++) { + admin.brokers().healthcheck(TopicVersion.V2); + } + future.complete(null); + }catch (PulsarAdminException e) { + future.completeExceptionally(e); + } + }); + for (int i = 0; i < times; i++) { + admin.brokers().healthcheck(TopicVersion.V2); + } + final String testHealthCheckTopic = String.format("persistent://pulsar/localhost:%s/healthcheck", + pulsar.getConfig().getWebServicePort().get()); + Awaitility.await().untilAsserted(() -> { + Assert.assertFalse(future.isCompletedExceptionally()); + }); + // To ensure we don't have any subscription + Awaitility.await().untilAsserted(() -> + Assert.assertTrue(CollectionUtils.isEmpty(admin.topics() + .getSubscriptions(testHealthCheckTopic).stream() + // All system topics are using compaction, even though is not explicitly set in the policies. + .filter(v -> !v.equals(Compactor.COMPACTION_SUBSCRIPTION)) + .collect(Collectors.toList()) + )) + ); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaAutoUpdateTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaAutoUpdateTest.java index 891e7ac533d2c..3b3a8c9826a97 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaAutoUpdateTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaAutoUpdateTest.java @@ -32,8 +32,6 @@ import org.apache.pulsar.common.policies.data.SchemaAutoUpdateCompatibilityStrategy; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -42,9 +40,6 @@ @Slf4j @Test(groups = "broker") public class AdminApiSchemaAutoUpdateTest extends MockedPulsarServiceBaseTest { - - private static final Logger LOG = LoggerFactory.getLogger(AdminApiSchemaAutoUpdateTest.class); - @BeforeMethod @Override public void setup() throws Exception { @@ -67,8 +62,8 @@ public void cleanup() throws Exception { } private void testAutoUpdateBackward(String namespace, String topicName) throws Exception { - Assert.assertEquals(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace), - SchemaAutoUpdateCompatibilityStrategy.Full); + Assert.assertNull(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace)); + admin.namespaces().setSchemaAutoUpdateCompatibilityStrategy(namespace, SchemaAutoUpdateCompatibilityStrategy.Backward); @@ -91,8 +86,8 @@ private void testAutoUpdateBackward(String namespace, String topicName) throws E } private void testAutoUpdateForward(String namespace, String topicName) throws Exception { - Assert.assertEquals(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace), - SchemaAutoUpdateCompatibilityStrategy.Full); + Assert.assertNull(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace)); + admin.namespaces().setSchemaAutoUpdateCompatibilityStrategy(namespace, SchemaAutoUpdateCompatibilityStrategy.Forward); @@ -114,8 +109,7 @@ private void testAutoUpdateForward(String namespace, String topicName) throws Ex } private void testAutoUpdateFull(String namespace, String topicName) throws Exception { - Assert.assertEquals(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace), - SchemaAutoUpdateCompatibilityStrategy.Full); + Assert.assertNull(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace)); try (Producer p = pulsarClient.newProducer(Schema.AVRO(V1Data.class)).topic(topicName).create()) { p.send(new V1Data("test1", 1)); @@ -142,8 +136,8 @@ private void testAutoUpdateFull(String namespace, String topicName) throws Excep } private void testAutoUpdateDisabled(String namespace, String topicName) throws Exception { - Assert.assertEquals(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace), - SchemaAutoUpdateCompatibilityStrategy.Full); + Assert.assertNull(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(namespace)); + admin.namespaces().setSchemaAutoUpdateCompatibilityStrategy(namespace, SchemaAutoUpdateCompatibilityStrategy.AutoUpdateDisabled); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java index dce60e0d38b1f..b4654afb6a811 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaTest.java @@ -20,9 +20,11 @@ import static java.lang.String.format; import static java.nio.charset.StandardCharsets.US_ASCII; +import static org.junit.Assert.assertNull; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doReturn; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.google.common.collect.Sets; @@ -40,14 +42,17 @@ import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.schema.SchemaDefinition; +import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.client.impl.schema.StringSchema; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.policies.data.SchemaAutoUpdateCompatibilityStrategy; +import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaInfoWithVersion; +import org.awaitility.Awaitility; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; @@ -61,6 +66,8 @@ public class AdminApiSchemaTest extends MockedPulsarServiceBaseTest { final String cluster = "test"; + private final String schemaCompatibilityNamespace = "schematest/test-schema-compatibility-ns"; + @BeforeMethod @Override public void setup() throws Exception { @@ -72,6 +79,7 @@ public void setup() throws Exception { admin.tenants().createTenant("schematest", tenantInfo); admin.namespaces().createNamespace("schematest/test", Sets.newHashSet("test")); admin.namespaces().createNamespace("schematest/"+cluster+"/test", Sets.newHashSet("test")); + admin.namespaces().createNamespace(schemaCompatibilityNamespace, Sets.newHashSet("test")); } @AfterMethod(alwaysRun = true) @@ -163,11 +171,13 @@ private void testSchemaInfoApi(Schema schema, SchemaInfo readSi = admin.schemas().getSchemaInfo(topicName); log.info("Read schema of topic {} : {}", topicName, readSi); + ((SchemaInfoImpl)readSi).setTimestamp(0); assertEquals(readSi, si); readSi = admin.schemas().getSchemaInfo(topicName + "-partition-0"); log.info("Read schema of topic {} : {}", topicName, readSi); + ((SchemaInfoImpl)readSi).setTimestamp(0); assertEquals(readSi, si); } @@ -216,12 +226,14 @@ private void testSchemaInfoWithVersionApi(Schema schema, SchemaInfoWithVersion readSi = admin.schemas().getSchemaInfoWithVersion(topicName); log.info("Read schema of topic {} : {}", topicName, readSi); + ((SchemaInfoImpl)readSi.getSchemaInfo()).setTimestamp(0); assertEquals(readSi.getSchemaInfo(), si); assertEquals(readSi.getVersion(), 0); readSi = admin.schemas().getSchemaInfoWithVersion(topicName + "-partition-0"); log.info("Read schema of topic {} : {}", topicName, readSi); + ((SchemaInfoImpl)readSi.getSchemaInfo()).setTimestamp(0); assertEquals(readSi.getSchemaInfo(), si); assertEquals(readSi.getVersion(), 0); @@ -233,11 +245,19 @@ public void createKeyValueSchema(ApiVersion version) throws Exception { "test"); String topicName = "persistent://"+namespace + "/test-key-value-schema"; Schema keyValueSchema = Schema.KeyValue(Schema.AVRO(Foo.class), Schema.AVRO(Foo.class)); - admin.schemas().createSchema(topicName, - keyValueSchema.getSchemaInfo()); + admin.schemas().createSchema(topicName, keyValueSchema.getSchemaInfo()); SchemaInfo schemaInfo = admin.schemas().getSchemaInfo(topicName); + long timestamp = schemaInfo.getTimestamp(); + assertNotEquals(keyValueSchema.getSchemaInfo().getTimestamp(), timestamp); + assertNotEquals(0, timestamp); + + ((SchemaInfoImpl)keyValueSchema.getSchemaInfo()).setTimestamp(schemaInfo.getTimestamp()); assertEquals(keyValueSchema.getSchemaInfo(), schemaInfo); + + admin.schemas().createSchema(topicName, keyValueSchema.getSchemaInfo()); + SchemaInfo schemaInfo2 = admin.schemas().getSchemaInfo(topicName); + assertEquals(timestamp, schemaInfo2.getTimestamp()); } @Test @@ -349,4 +369,51 @@ public long getCToken() { assertEquals(ledgerInfo.entries, entryId + 1); assertEquals(ledgerInfo.size, length); } + + @Test + public void testGetSchemaCompatibilityStrategy() throws PulsarAdminException { + assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(schemaCompatibilityNamespace), + SchemaCompatibilityStrategy.UNDEFINED); + } + + @Test + public void testGetSchemaAutoUpdateCompatibilityStrategy() throws PulsarAdminException { + assertNull(admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(schemaCompatibilityNamespace)); + } + + @Test + public void testGetSchemaCompatibilityStrategyWhenSetSchemaAutoUpdateCompatibilityStrategy() + throws PulsarAdminException { + assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(schemaCompatibilityNamespace), + SchemaCompatibilityStrategy.UNDEFINED); + + admin.namespaces().setSchemaAutoUpdateCompatibilityStrategy(schemaCompatibilityNamespace, + SchemaAutoUpdateCompatibilityStrategy.Forward); + Awaitility.await().untilAsserted(() -> assertEquals(SchemaAutoUpdateCompatibilityStrategy.Forward, + admin.namespaces().getSchemaAutoUpdateCompatibilityStrategy(schemaCompatibilityNamespace) + )); + + assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(schemaCompatibilityNamespace), + SchemaCompatibilityStrategy.UNDEFINED); + + admin.namespaces().setSchemaCompatibilityStrategy(schemaCompatibilityNamespace, + SchemaCompatibilityStrategy.BACKWARD); + Awaitility.await().untilAsserted(() -> assertEquals(SchemaCompatibilityStrategy.BACKWARD, + admin.namespaces().getSchemaCompatibilityStrategy(schemaCompatibilityNamespace))); + } + + @Test + public void testGetSchemaCompatibilityStrategyWhenSetBrokerLevelAndSchemaAutoUpdateCompatibilityStrategy() + throws PulsarAdminException { + pulsar.getConfiguration().setSchemaCompatibilityStrategy(SchemaCompatibilityStrategy.FORWARD); + + assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(schemaCompatibilityNamespace), + SchemaCompatibilityStrategy.UNDEFINED); + + admin.namespaces().setSchemaAutoUpdateCompatibilityStrategy(schemaCompatibilityNamespace, + SchemaAutoUpdateCompatibilityStrategy.AlwaysCompatible); + Awaitility.await().untilAsserted(() -> assertEquals( + admin.namespaces().getSchemaCompatibilityStrategy(schemaCompatibilityNamespace), + SchemaCompatibilityStrategy.UNDEFINED)); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaValidationEnforced.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaValidationEnforced.java index 3daf920c975bc..75d77dc3da875 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaValidationEnforced.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaValidationEnforced.java @@ -30,9 +30,7 @@ import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.protocol.schema.PostSchemaPayload; import org.apache.pulsar.common.schema.SchemaInfo; @@ -68,6 +66,15 @@ public void cleanup() throws Exception { super.internalCleanup(); } + @Test + public void testGetSchemaValidationEnforcedApplied() throws Exception { + String namespace = "schema-validation-enforced/testApplied"; + admin.namespaces().createNamespace(namespace); + this.conf.setSchemaValidationEnforced(true); + assertTrue(admin.namespaces().getSchemaValidationEnforced(namespace, true)); + assertFalse(admin.namespaces().getSchemaValidationEnforced(namespace, false)); + } + @Test public void testDisableSchemaValidationEnforcedNoSchema() throws Exception { admin.namespaces().createNamespace("schema-validation-enforced/default-no-schema"); @@ -98,7 +105,7 @@ public void testDisableSchemaValidationEnforcedHasSchema() throws Exception { assertTrue(e.getMessage().contains("HTTP 404 Not Found")); } Map properties = Maps.newHashMap(); - SchemaInfo schemaInfo = SchemaInfoImpl.builder() + SchemaInfo schemaInfo = SchemaInfo.builder() .type(SchemaType.STRING) .properties(properties) .name("test") @@ -147,7 +154,7 @@ public void testEnableSchemaValidationEnforcedHasSchemaMismatch() throws Excepti } Map properties = Maps.newHashMap(); properties.put("key1", "value1"); - SchemaInfo schemaInfo = SchemaInfoImpl.builder() + SchemaInfo schemaInfo = SchemaInfo.builder() .type(SchemaType.STRING) .properties(properties) .name("test") @@ -177,7 +184,7 @@ public void testEnableSchemaValidationEnforcedHasSchemaMatch() throws Exception } admin.namespaces().setSchemaValidationEnforced(namespace,true); Map properties = Maps.newHashMap(); - SchemaInfo schemaInfo = SchemaInfoImpl.builder() + SchemaInfo schemaInfo = SchemaInfo.builder() .type(SchemaType.STRING) .properties(properties) .name("test") diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaWithAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaWithAuthTest.java new file mode 100644 index 0000000000000..20fa07979e07f --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSchemaWithAuthTest.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import static org.testng.Assert.assertThrows; +import static org.testng.AssertJUnit.assertEquals; +import static org.testng.AssertJUnit.assertTrue; +import com.google.common.collect.Sets; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.SignatureAlgorithm; +import java.util.Base64; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import javax.crypto.SecretKey; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminBuilder; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; +import org.apache.pulsar.common.policies.data.AuthAction; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.schema.SchemaInfo; +import org.mockito.Mockito; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit tests for schema admin api. + */ +@Slf4j +@Test(groups = "broker-admin") +public class AdminApiSchemaWithAuthTest extends MockedPulsarServiceBaseTest { + + private static final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + private static final String ADMIN_TOKEN = Jwts.builder().setSubject("admin").signWith(SECRET_KEY).compact(); + private static final String CONSUME_TOKEN = Jwts.builder().setSubject("consumer").signWith(SECRET_KEY).compact(); + + private static final String PRODUCE_TOKEN = Jwts.builder().setSubject("producer").signWith(SECRET_KEY).compact(); + + @BeforeMethod + @Override + public void setup() throws Exception { + conf.setAuthorizationEnabled(true); + conf.setAuthenticationEnabled(true); + conf.getProperties().setProperty("tokenSecretKey", "data:;base64," + + Base64.getEncoder().encodeToString(SECRET_KEY.getEncoded())); + Set providers = new HashSet<>(); + providers.add(AuthenticationProviderToken.class.getName()); + Set superUserRoles = new HashSet<>(); + superUserRoles.add("admin"); + conf.setSuperUserRoles(superUserRoles); + conf.setAuthenticationProviders(providers); + conf.setSystemTopicEnabled(false); + conf.setTopicLevelPoliciesEnabled(false); + super.internalSetup(); + + PulsarAdminBuilder pulsarAdminBuilder = PulsarAdmin.builder().serviceHttpUrl(brokerUrl != null + ? brokerUrl.toString() : brokerUrlTls.toString()) + .authentication(AuthenticationToken.class.getName(), + ADMIN_TOKEN); + admin = Mockito.spy(pulsarAdminBuilder.build()); + + // Setup namespaces + admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); + TenantInfoImpl tenantInfo = new TenantInfoImpl(Sets.newHashSet("role1", "role2"), Sets.newHashSet("test")); + admin.tenants().createTenant("schematest", tenantInfo); + admin.namespaces().createNamespace("schematest/test", Sets.newHashSet("test")); + } + + @AfterMethod(alwaysRun = true) + @Override + public void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testGetCreateDeleteSchema() throws Exception { + String topicName = "persistent://schematest/test/testCreateSchema"; + PulsarAdmin adminWithoutPermission = PulsarAdmin.builder() + .serviceHttpUrl(brokerUrl != null ? brokerUrl.toString() : brokerUrlTls.toString()) + .build(); + PulsarAdmin adminWithAdminPermission = PulsarAdmin.builder() + .serviceHttpUrl(brokerUrl != null ? brokerUrl.toString() : brokerUrlTls.toString()) + .authentication(AuthenticationToken.class.getName(), ADMIN_TOKEN) + .build(); + PulsarAdmin adminWithConsumePermission = PulsarAdmin.builder() + .serviceHttpUrl(brokerUrl != null ? brokerUrl.toString() : brokerUrlTls.toString()) + .authentication(AuthenticationToken.class.getName(), CONSUME_TOKEN) + .build(); + + PulsarAdmin adminWithProducePermission = PulsarAdmin.builder() + .serviceHttpUrl(brokerUrl != null ? brokerUrl.toString() : brokerUrlTls.toString()) + .authentication(AuthenticationToken.class.getName(), PRODUCE_TOKEN) + .build(); + admin.topics().grantPermission(topicName, "consumer", EnumSet.of(AuthAction.consume)); + admin.topics().grantPermission(topicName, "producer", EnumSet.of(AuthAction.produce)); + + SchemaInfo si = Schema.BOOL.getSchemaInfo(); + assertThrows(PulsarAdminException.class, () -> adminWithConsumePermission.schemas().getSchemaInfo(topicName)); + assertThrows(PulsarAdminException.class, () -> adminWithoutPermission.schemas().createSchema(topicName, si)); + adminWithProducePermission.schemas().createSchema(topicName, si); + adminWithAdminPermission.schemas().createSchema(topicName, si); + + assertThrows(PulsarAdminException.class, () -> adminWithoutPermission.schemas().getSchemaInfo(topicName)); + SchemaInfo readSi = adminWithConsumePermission.schemas().getSchemaInfo(topicName); + ((SchemaInfoImpl) readSi).setTimestamp(0); + assertEquals(readSi, si); + + assertThrows(PulsarAdminException.class, () -> adminWithoutPermission.schemas().getSchemaInfo(topicName, 0)); + readSi = adminWithConsumePermission.schemas().getSchemaInfo(topicName, 0); + ((SchemaInfoImpl) readSi).setTimestamp(0); + assertEquals(readSi, si); + List allSchemas = adminWithConsumePermission.schemas().getAllSchemas(topicName); + assertEquals(allSchemas.size(), 1); + + SchemaInfo schemaInfo2 = Schema.BOOL.getSchemaInfo(); + assertThrows(PulsarAdminException.class, () -> + adminWithoutPermission.schemas().testCompatibility(topicName, schemaInfo2)); + assertTrue(adminWithAdminPermission.schemas().testCompatibility(topicName, schemaInfo2).isCompatibility()); + + assertThrows(PulsarAdminException.class, () -> + adminWithoutPermission.schemas().getVersionBySchema(topicName, si)); + Long versionBySchema = adminWithConsumePermission.schemas().getVersionBySchema(topicName, si); + assertEquals(versionBySchema, Long.valueOf(0L)); + + assertThrows(PulsarAdminException.class, () -> adminWithoutPermission.schemas().deleteSchema(topicName)); + adminWithAdminPermission.schemas().deleteSchema(topicName); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSubscriptionTest.java new file mode 100644 index 0000000000000..6f38ccd8b5cf5 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiSubscriptionTest.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.expectThrows; +import javax.ws.rs.core.Response; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.MessageId; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-admin") +public class AdminApiSubscriptionTest extends MockedPulsarServiceBaseTest { + @BeforeMethod + @Override + public void setup() throws Exception { + super.internalSetup(); + super.setupDefaultTenantAndNamespace(); + } + + @AfterMethod(alwaysRun = true) + @Override + public void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testExpireNonExistTopic() throws Exception { + String topic = "test-expire-messages-topic"; + String subscriptionName = "test-expire-messages-sub"; + admin.topics().createSubscription(topic, subscriptionName, MessageId.latest); + assertEquals(expectThrows(PulsarAdminException.class, + () -> admin.topics().expireMessages(topic, subscriptionName, 1)).getStatusCode(), + Response.Status.CONFLICT.getStatusCode()); + assertEquals(expectThrows(PulsarAdminException.class, + () -> admin.topics().expireMessagesForAllSubscriptions(topic, 1)).getStatusCode(), + Response.Status.CONFLICT.getStatusCode()); + } + + @Test + public void TestExpireNonExistTopicAndNonExistSub() { + String topic = "test-expire-messages-topic"; + String subscriptionName = "test-expire-messages-sub"; + assertEquals(expectThrows(PulsarAdminException.class, + () -> admin.topics().expireMessages(topic, subscriptionName, 1)).getStatusCode(), + Response.Status.NOT_FOUND.getStatusCode()); + assertEquals(expectThrows(PulsarAdminException.class, + () -> admin.topics().expireMessagesForAllSubscriptions(topic, 1)).getStatusCode(), + Response.Status.NOT_FOUND.getStatusCode()); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java index 7e1c576170636..348f5cb916d6d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminApiTest.java @@ -670,6 +670,14 @@ public void properties() throws PulsarAdminException { assertEquals(admin.tenants().getTenantInfo("prop-xyz"), newTenantAdmin); + try { + admin.tenants().deleteTenant("prop-xyz"); + fail("should have failed"); + } catch (PulsarAdminException e) { + assertTrue(e instanceof ConflictException); + assertEquals(e.getStatusCode(), 409); + assertEquals(e.getMessage(), "The tenant still has active namespaces"); + } admin.namespaces().deleteNamespace("prop-xyz/ns1"); admin.tenants().deleteTenant("prop-xyz"); assertEquals(admin.tenants().getTenants(), Lists.newArrayList()); @@ -728,6 +736,7 @@ public void namespaces() throws Exception { policies.bundles = PoliciesUtil.defaultBundle(); policies.auth_policies.getNamespaceAuthentication().put("spiffe://developer/passport-role", EnumSet.allOf(AuthAction.class)); policies.auth_policies.getNamespaceAuthentication().put("my-role", EnumSet.allOf(AuthAction.class)); + policies.is_allow_auto_update_schema = conf.isAllowAutoUpdateSchemaEnabled(); assertEquals(admin.namespaces().getPolicies("prop-xyz/ns1"), policies); assertEquals(admin.namespaces().getPermissions("prop-xyz/ns1"), policies.auth_policies.getNamespaceAuthentication()); @@ -738,6 +747,7 @@ public void namespaces() throws Exception { admin.namespaces().revokePermissionsOnNamespace("prop-xyz/ns1", "my-role"); policies.auth_policies.getNamespaceAuthentication().remove("spiffe://developer/passport-role"); policies.auth_policies.getNamespaceAuthentication().remove("my-role"); + policies.is_allow_auto_update_schema = conf.isAllowAutoUpdateSchemaEnabled(); assertEquals(admin.namespaces().getPolicies("prop-xyz/ns1"), policies); assertEquals(admin.namespaces().getPersistence("prop-xyz/ns1"), null); @@ -782,9 +792,9 @@ public void namespaces() throws Exception { } // Force topic creation and namespace being loaded - producer = pulsarClient.newProducer(Schema.BYTES).topic("persistent://prop-xyz/use/ns2/my-topic").create(); + producer = pulsarClient.newProducer(Schema.BYTES).topic("persistent://prop-xyz/ns2/my-topic").create(); producer.close(); - admin.topics().delete("persistent://prop-xyz/use/ns2/my-topic"); + admin.topics().delete("persistent://prop-xyz/ns2/my-topic"); // both unload and delete should succeed for ns2 on other broker with a redirect // otheradmin.namespaces().unload("prop-xyz/use/ns2"); @@ -1490,10 +1500,11 @@ public void testNamespaceSplitBundleConcurrent() throws Exception { fail("split bundle shouldn't have thrown exception"); } + Awaitility.await().untilAsserted(() -> + assertEquals(bundleFactory.getBundles(NamespaceName.get(namespace)).getBundles().size(), 4)); String[] splitRange4 = { namespace + "/0x00000000_0x3fffffff", namespace + "/0x3fffffff_0x7fffffff", namespace + "/0x7fffffff_0xbfffffff", namespace + "/0xbfffffff_0xffffffff" }; bundles = bundleFactory.getBundles(NamespaceName.get(namespace)); - assertEquals(bundles.getBundles().size(), 4); for (int i = 0; i < bundles.getBundles().size(); i++) { assertEquals(bundles.getBundles().get(i).toString(), splitRange4[i]); } @@ -1523,13 +1534,13 @@ public void testNamespaceSplitBundleConcurrent() throws Exception { } catch (Exception e) { fail("split bundle shouldn't have thrown exception"); } - + Awaitility.await().untilAsserted(() -> + assertEquals(bundleFactory.getBundles(NamespaceName.get(namespace)).getBundles().size(), 8)); String[] splitRange8 = { namespace + "/0x00000000_0x1fffffff", namespace + "/0x1fffffff_0x3fffffff", namespace + "/0x3fffffff_0x5fffffff", namespace + "/0x5fffffff_0x7fffffff", namespace + "/0x7fffffff_0x9fffffff", namespace + "/0x9fffffff_0xbfffffff", namespace + "/0xbfffffff_0xdfffffff", namespace + "/0xdfffffff_0xffffffff" }; bundles = bundleFactory.getBundles(NamespaceName.get(namespace)); - assertEquals(bundles.getBundles().size(), 8); for (int i = 0; i < bundles.getBundles().size(); i++) { assertEquals(bundles.getBundles().get(i).toString(), splitRange8[i]); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java index 63ade238aad56..1e825d340c001 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/AdminTest.java @@ -43,8 +43,10 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -67,8 +69,10 @@ import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.authentication.AuthenticationDataHttps; import org.apache.pulsar.broker.loadbalance.LeaderBroker; +import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.web.PulsarWebResource; import org.apache.pulsar.broker.web.RestException; +import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace; import org.apache.pulsar.common.conf.InternalConfigurationData; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; @@ -79,6 +83,7 @@ import org.apache.pulsar.common.policies.data.BundlesData; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.apache.pulsar.common.policies.data.ErrorData; import org.apache.pulsar.common.policies.data.NamespaceIsolationDataImpl; import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.ResourceQuota; @@ -119,23 +124,29 @@ public AdminTest() { super(); } + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + conf.setMaxTenants(10); + } + @Override @BeforeMethod public void setup() throws Exception { conf.setClusterName(configClusterName); super.internalSetup(); - clusters = spy(new Clusters()); + clusters = spy(Clusters.class); clusters.setPulsar(pulsar); doReturn("test").when(clusters).clientAppId(); doNothing().when(clusters).validateSuperUserAccess(); - properties = spy(new Properties()); + properties = spy(Properties.class); properties.setPulsar(pulsar); doReturn("test").when(properties).clientAppId(); doNothing().when(properties).validateSuperUserAccess(); - namespaces = spy(new Namespaces()); + namespaces = spy(Namespaces.class); namespaces.setServletContext(new MockServletContext()); namespaces.setPulsar(pulsar); doReturn("test").when(namespaces).clientAppId(); @@ -144,7 +155,7 @@ public void setup() throws Exception { doNothing().when(namespaces).validateAdminAccessForTenant("other-tenant"); doNothing().when(namespaces).validateAdminAccessForTenant("new-property"); - brokers = spy(new Brokers()); + brokers = spy(Brokers.class); brokers.setPulsar(pulsar); doReturn("test").when(brokers).clientAppId(); doNothing().when(brokers).validateSuperUserAccess(); @@ -152,7 +163,7 @@ public void setup() throws Exception { uriField = PulsarWebResource.class.getDeclaredField("uri"); uriField.setAccessible(true); - persistentTopics = spy(new PersistentTopics()); + persistentTopics = spy(PersistentTopics.class); persistentTopics.setServletContext(new MockServletContext()); persistentTopics.setPulsar(pulsar); doReturn("test").when(persistentTopics).clientAppId(); @@ -162,11 +173,11 @@ public void setup() throws Exception { doNothing().when(persistentTopics).validateAdminAccessForTenant("other-tenant"); doNothing().when(persistentTopics).validateAdminAccessForTenant("prop-xyz"); - resourceQuotas = spy(new ResourceQuotas()); + resourceQuotas = spy(ResourceQuotas.class); resourceQuotas.setServletContext(new MockServletContext()); resourceQuotas.setPulsar(pulsar); - brokerStats = spy(new BrokerStats()); + brokerStats = spy(BrokerStats.class); brokerStats.setServletContext(new MockServletContext()); brokerStats.setPulsar(pulsar); @@ -175,7 +186,7 @@ public void setup() throws Exception { doReturn("test").when(persistentTopics).clientAppId(); doReturn(mock(AuthenticationDataHttps.class)).when(persistentTopics).clientAuthData(); - schemasResource = spy(new SchemasResource(mockClock)); + schemasResource = spy(SchemasResource.class); schemasResource.setServletContext(new MockServletContext()); schemasResource.setPulsar(pulsar); } @@ -622,6 +633,33 @@ public void properties() throws Throwable { assertEquals(e.getResponse().getStatus(), Status.PRECONDITION_FAILED.getStatusCode()); } + // Check max tenant count + int maxTenants = pulsar.getConfiguration().getMaxTenants(); + List tenants = pulsar.getPulsarResources().getTenantResources().listTenants(); + + for(int tenantSize = tenants.size();tenantSize < maxTenants; tenantSize++ ){ + final int tenantIndex = tenantSize; + Response obj = (Response)asynRequests(ctx -> + properties.createTenant(ctx, "test-tenant-" + tenantIndex, tenantInfo)); + Assert.assertTrue(obj.getStatus() < 400 && obj.getStatus() >= 200); + } + try { + response = asynRequests(ctx -> + properties.createTenant(ctx, "test-tenant-" + maxTenants, tenantInfo)); + fail("should have failed"); + } catch (RestException e) { + assertEquals(e.getResponse().getStatus(), Status.PRECONDITION_FAILED.getStatusCode()); + } + + // Check creating existing property when tenant reach max count. + try { + response = asynRequests(ctx -> + properties.createTenant(ctx, "test-tenant-" + (maxTenants-1), tenantInfo)); + fail("should have failed"); + } catch (RestException e) { + assertEquals(e.getResponse().getStatus(), Status.CONFLICT.getStatusCode()); + } + AsyncResponse response2 = mock(AsyncResponse.class); namespaces.deleteNamespace(response2, "my-tenant", "use", "my-namespace", false, false); ArgumentCaptor captor = ArgumentCaptor.forClass(Response.class); @@ -830,6 +868,25 @@ public void testUpdatePartitionedTopicCoontainedInOldTopic() throws Exception { false, 10); } + @Test + public void test500Error() throws Exception { + final String property = "prop-xyz"; + final String cluster = "use"; + final String namespace = "ns"; + final String partitionedTopicName = "error-500-topic"; + AsyncResponse response1 = mock(AsyncResponse.class); + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(RestException.class); + NamespaceName namespaceName = NamespaceName.get(property, cluster, namespace); + NamespaceService ns = pulsar.getNamespaceService(); + CompletableFuture> future = new CompletableFuture(); + future.completeExceptionally(new RuntimeException("500 error contains error message")); + doReturn(future).when(ns).getListOfTopics(namespaceName, CommandGetTopicsOfNamespace.Mode.ALL); + persistentTopics.createPartitionedTopic(response1, property, cluster, namespace, partitionedTopicName, 5, false); + verify(response1, timeout(5000).times(1)).resume(responseCaptor.capture()); + Assert.assertEquals(responseCaptor.getValue().getResponse().getStatus(), Status.INTERNAL_SERVER_ERROR.getStatusCode()); + Assert.assertTrue(((ErrorData)responseCaptor.getValue().getResponse().getEntity()).reason.contains("500 error contains error message")); + } + static class TestAsyncResponse implements AsyncResponse { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BookiesApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BookiesApiTest.java index 644d47df7678d..a3bd52b090236 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BookiesApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/BookiesApiTest.java @@ -18,13 +18,14 @@ */ package org.apache.pulsar.broker.admin; +import static org.mockito.Mockito.doReturn; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import java.util.Optional; import lombok.extern.slf4j.Slf4j; -import org.apache.bookkeeper.client.PulsarMockBookKeeper; +import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.common.policies.data.BookieInfo; @@ -123,6 +124,61 @@ public void testBasic() throws Exception { .get() .getValue() .size()); + + // test invalid rack name + // use rack aware placement policy + String errorMsg = "Bookie 'rack' parameter is invalid, When `RackawareEnsemblePlacementPolicy` is enabled, " + + "the rack name is not allowed to contain slash (`/`) except for the beginning and end of the rack name " + + "string. When `RegionawareEnsemblePlacementPolicy` is enabled, the rack name can only contain " + + "one slash (`/`) except for the beginning and end of the rack name string."; + + BookieInfo newInfo3 = BookieInfo.builder() + .rack("/rack/a") + .hostname("127.0.0.2") + .build(); + try { + admin.bookies().updateBookieRackInfo(bookie0, "default", newInfo3); + fail(); + } catch (PulsarAdminException e) { + assertEquals(412, e.getStatusCode()); + assertEquals(errorMsg, e.getMessage()); + } + + BookieInfo newInfo4 = BookieInfo.builder() + .rack("/rack") + .hostname("127.0.0.2") + .build(); + try { + admin.bookies().updateBookieRackInfo(bookie0, "default", newInfo4); + } catch (PulsarAdminException e) { + fail(); + } + + // enable region aware placement policy + ServiceConfiguration configuration = new ServiceConfiguration(); + configuration.setBookkeeperClientRegionawarePolicyEnabled(true); + doReturn(configuration).when(pulsar).getConfiguration(); + BookieInfo newInfo5 = BookieInfo.builder() + .rack("/region/rack/a") + .hostname("127.0.0.2") + .build(); + try { + admin.bookies().updateBookieRackInfo(bookie0, "default", newInfo5); + fail(); + } catch (PulsarAdminException e) { + assertEquals(412, e.getStatusCode()); + assertEquals(errorMsg, e.getMessage()); + } + + BookieInfo newInfo6 = BookieInfo.builder() + .rack("/region/rack/") + .hostname("127.0.0.2") + .build(); + try { + admin.bookies().updateBookieRackInfo(bookie0, "default", newInfo6); + } catch (PulsarAdminException e) { + fail(); + } } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java index e0d1720dc1f9a..09f2c91cc2ab6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/CreateSubscriptionTest.java @@ -19,15 +19,22 @@ package org.apache.pulsar.broker.admin; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.google.common.collect.Lists; import javax.ws.rs.ClientErrorException; import javax.ws.rs.core.Response.Status; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.admin.PulsarAdminException.ConflictException; +import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.common.naming.TopicName; +import org.awaitility.Awaitility; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -127,4 +134,19 @@ public void createSubscriptionOnPartitionedTopicWithPartialFailure() throws Exce Lists.newArrayList("sub-1")); } } + + @Test + public void testWaitingCurosrCausedMemoryLeak() throws Exception { + String topic = "persistent://my-property/my-ns/my-topic"; + for (int i = 0; i < 10; i ++) { + Consumer consumer = pulsarClient.newConsumer(Schema.STRING).topic(topic) + .subscriptionType(SubscriptionType.Failover).subscriptionName("test" + i).subscribe(); + Awaitility.await().untilAsserted(() -> assertTrue(consumer.isConnected())); + consumer.close(); + } + PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + ManagedLedgerImpl ml = (ManagedLedgerImpl)(topicRef.getManagedLedger()); + assertEquals(ml.getWaitingCursorsCount(), 0); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java index 9a366a2cc7c1a..636cbe5980540 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/NamespacesTest.java @@ -155,7 +155,7 @@ public void setup() throws Exception { conf.setClusterName(testLocalCluster); super.internalSetup(); - namespaces = spy(new Namespaces()); + namespaces = spy(Namespaces.class); namespaces.setServletContext(new MockServletContext()); namespaces.setPulsar(pulsar); doReturn(false).when(namespaces).isRequestHttps(); @@ -771,7 +771,7 @@ public void testDeleteNamespaces() throws Exception { @Test public void testDeleteNamespaceWithBundles() throws Exception { URL localWebServiceUrl = new URL(pulsar.getSafeWebServiceAddress()); - String bundledNsLocal = "test-bundled-namespace-1"; + String bundledNsLocal = "test-delete-namespace-with-bundles"; List boundaries = Lists.newArrayList("0x00000000", "0x80000000", "0xffffffff"); BundlesData bundleData = BundlesData.builder() .boundaries(boundaries) @@ -796,8 +796,8 @@ public boolean matches(NamespaceBundle bundle) { return bundle.getNamespaceObject().equals(testNs); } })); - doReturn(Optional.of(mock(NamespaceEphemeralData.class))).when(nsSvc) - .getOwner(Mockito.argThat(new ArgumentMatcher() { + doReturn(CompletableFuture.completedFuture(Optional.of(mock(NamespaceEphemeralData.class)))).when(nsSvc) + .getOwnerAsync(Mockito.argThat(new ArgumentMatcher() { @Override public boolean matches(NamespaceBundle bundle) { return bundle.getNamespaceObject().equals(testNs); @@ -820,14 +820,14 @@ public boolean matches(NamespaceBundle bundle) { } catch (RestException re) { assertEquals(re.getResponse().getStatus(), Status.PRECONDITION_FAILED.getStatusCode()); } - + NamespaceBundles nsBundles = nsSvc.getNamespaceBundleFactory().getBundles(testNs, bundleData); + doReturn(Optional.empty()).when(nsSvc).getWebServiceUrl(any(NamespaceBundle.class), any(LookupOptions.class)); AsyncResponse response = mock(AsyncResponse.class); namespaces.deleteNamespace(response, testTenant, testLocalCluster, bundledNsLocal, false, false); ArgumentCaptor captor = ArgumentCaptor.forClass(RestException.class); verify(response, timeout(5000).times(1)).resume(captor.capture()); assertEquals(captor.getValue().getResponse().getStatus(), Status.PRECONDITION_FAILED.getStatusCode()); - NamespaceBundles nsBundles = nsSvc.getNamespaceBundleFactory().getBundles(testNs, bundleData); // make one bundle owned LookupOptions optionsHttps = LookupOptions.builder().authoritative(false).requestHttps(true).readOnly(false).build(); doReturn(Optional.of(localWebServiceUrl)).when(nsSvc).getWebServiceUrl(nsBundles.getBundles().get(0), optionsHttps); @@ -844,17 +844,16 @@ public boolean matches(NamespaceBundle bundle) { } response = mock(AsyncResponse.class); - namespaces.deleteNamespace(response, testTenant, testLocalCluster, bundledNsLocal, false, false); - captor = ArgumentCaptor.forClass(RestException.class); - verify(response, timeout(5000).times(1)).resume(captor.capture()); - assertEquals(captor.getValue().getResponse().getStatus(), Status.PRECONDITION_FAILED.getStatusCode()); + doReturn(Optional.of(localWebServiceUrl)).when(nsSvc).getWebServiceUrl(any(NamespaceBundle.class), any(LookupOptions.class)); // ensure all three bundles are owned by the local broker for (NamespaceBundle bundle : nsBundles.getBundles()) { - doReturn(Optional.of(localWebServiceUrl)).when(nsSvc).getWebServiceUrl(bundle, optionsHttps); doReturn(true).when(nsSvc).isServiceUnitOwned(bundle); } - doNothing().when(namespacesAdmin).deleteNamespaceBundle(Mockito.anyString(), Mockito.anyString()); + namespaces.deleteNamespace(response, testTenant, testLocalCluster, bundledNsLocal, false, false); + ArgumentCaptor captor2 = ArgumentCaptor.forClass(Response.class); + verify(response, timeout(5000).times(1)).resume(captor2.capture()); + assertEquals(captor2.getValue().getStatus(), Status.NO_CONTENT.getStatusCode()); } @Test @@ -1090,7 +1089,7 @@ public void testValidateTopicOwnership() throws Exception { ownership.setAccessible(true); ownership.set(pulsar.getNamespaceService(), MockOwnershipCache); TopicName topicName = TopicName.get(testNs.getPersistentTopicName("my-topic")); - PersistentTopics topics = spy(new PersistentTopics()); + PersistentTopics topics = spy(PersistentTopics.class); topics.setServletContext(new MockServletContext()); topics.setPulsar(pulsar); doReturn(false).when(topics).isRequestHttps(); @@ -1254,7 +1253,7 @@ public void testSubscribeRate() throws Exception { admin.tenants().deleteTenant("my-tenants"); } - class MockLedgerOffloader implements LedgerOffloader { + public static class MockLedgerOffloader implements LedgerOffloader { ConcurrentHashMap offloads = new ConcurrentHashMap(); ConcurrentHashMap deletes = new ConcurrentHashMap(); @@ -1503,6 +1502,8 @@ public void testMaxTopicsPerNamespace() throws Exception { admin.tenants().createTenant("testTenant", tenantInfo); admin.namespaces().createNamespace(namespace, Sets.newHashSet("use")); + assertEquals(0, admin.namespaces().getMaxTopicsPerNamespace(namespace)); + admin.namespaces().setMaxTopicsPerNamespace(namespace, 10); assertEquals(10, admin.namespaces().getMaxTopicsPerNamespace(namespace)); @@ -1738,4 +1739,18 @@ private void assertInvalidRetentionPolicyAsPartOfAllPolicies(Policies policies, assertTrue(e.getMessage().startsWith("Invalid retention policy")); } } + + @Test + public void testSplitBundleForMultiTimes() throws Exception{ + String namespace = BrokerTestUtil.newUniqueName(this.testTenant + "/namespace"); + BundlesData data = BundlesData.builder().numBundles(4).build(); + admin.namespaces().createNamespace(namespace, data); + for (int i = 0; i < 10; i ++) { + final BundlesData bundles = admin.namespaces().getBundles(namespace); + final String bundle = bundles.getBoundaries().get(0) + "_" + bundles.getBoundaries().get(1); + admin.namespaces().splitNamespaceBundle(namespace, bundle, true, null); + } + BundlesData bundles = admin.namespaces().getBundles(namespace); + assertEquals(bundles.getNumBundles(), 14); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java index 4a27dbb245122..4591aebc67df3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/PersistentTopicsTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.admin; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; @@ -43,6 +44,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import javax.ws.rs.InternalServerErrorException; import javax.ws.rs.WebApplicationException; import javax.ws.rs.container.AsyncResponse; import javax.ws.rs.core.Response; @@ -52,8 +54,11 @@ import org.apache.pulsar.broker.admin.v2.PersistentTopics; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.authentication.AuthenticationDataHttps; +import org.apache.pulsar.broker.resources.NamespaceResources; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.broker.resources.TopicResources; +import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.web.PulsarWebResource; import org.apache.pulsar.broker.web.RestException; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -78,10 +83,9 @@ import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.zookeeper.KeeperException; import org.mockito.ArgumentCaptor; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.reflect.Whitebox; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -89,8 +93,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -@PrepareForTest(PersistentTopics.class) -@PowerMockIgnore("com.sun.management.*") @Slf4j @Test(groups = "broker") public class PersistentTopicsTest extends MockedPulsarServiceBaseTest { @@ -102,6 +104,7 @@ public class PersistentTopicsTest extends MockedPulsarServiceBaseTest { protected Field uriField; protected UriInfo uriInfo; private NonPersistentTopics nonPersistentTopic; + private NamespaceResources namespaceResources; @BeforeClass public void initPersistentTopics() throws Exception { @@ -114,7 +117,7 @@ public void initPersistentTopics() throws Exception { @BeforeMethod protected void setup() throws Exception { super.internalSetup(); - persistentTopics = spy(new PersistentTopics()); + persistentTopics = spy(PersistentTopics.class); persistentTopics.setServletContext(new MockServletContext()); persistentTopics.setPulsar(pulsar); doReturn(false).when(persistentTopics).isRequestHttps(); @@ -124,9 +127,10 @@ protected void setup() throws Exception { doNothing().when(persistentTopics).validateAdminAccessForTenant(this.testTenant); doReturn(mock(AuthenticationDataHttps.class)).when(persistentTopics).clientAuthData(); - nonPersistentTopic = spy(new NonPersistentTopics()); + nonPersistentTopic = spy(NonPersistentTopics.class); nonPersistentTopic.setServletContext(new MockServletContext()); nonPersistentTopic.setPulsar(pulsar); + namespaceResources = mock(NamespaceResources.class); doReturn(false).when(nonPersistentTopic).isRequestHttps(); doReturn(null).when(nonPersistentTopic).originalPrincipal(); doReturn("test").when(nonPersistentTopic).clientAppId(); @@ -136,7 +140,8 @@ protected void setup() throws Exception { PulsarResources resources = spy(new PulsarResources(pulsar.getLocalMetadataStore(), pulsar.getConfigurationMetadataStore())); - doReturn(spy(new TopicResources(pulsar.getLocalMetadataStore()))).when(resources).getTopicResources(); + doReturn(spyWithClassAndConstructorArgs(TopicResources.class, + pulsar.getLocalMetadataStore())).when(resources).getTopicResources(); Whitebox.setInternalState(pulsar, "pulsarResources", resources); admin.clusters().createCluster("use", ClusterData.builder().serviceUrl("http://broker-use.com:8080").build()); @@ -408,6 +413,33 @@ public void testCreateNonPartitionedTopic() { Assert.assertEquals(metadata.partitions, 0); } + @Test + public void testCreateTopicWithReplicationCluster() { + final String topicName = "test-topic-ownership"; + NamespaceName namespaceName = NamespaceName.get(testTenant, testNamespace); + CompletableFuture> policyFuture = new CompletableFuture<>(); + Policies policies = new Policies(); + policyFuture.complete(Optional.of(policies)); + when(pulsar.getPulsarResources().getNamespaceResources()).thenReturn(namespaceResources); + doReturn(policyFuture).when(namespaceResources).getPoliciesAsync(namespaceName); + AsyncResponse response = mock(AsyncResponse.class); + ArgumentCaptor errCaptor = ArgumentCaptor.forClass(RestException.class); + persistentTopics.createPartitionedTopic(response, testTenant, testNamespace, topicName, 2, true); + verify(response, timeout(5000).times(1)).resume(errCaptor.capture()); + Assert.assertEquals(errCaptor.getValue().getResponse().getStatus(), Response.Status.PRECONDITION_FAILED.getStatusCode()); + Assert.assertTrue(errCaptor.getValue().getMessage().contains("Namespace does not have any clusters configured")); + // Test policy not exist and return 'Namespace not found' + CompletableFuture> policyFuture2 = new CompletableFuture<>(); + policyFuture2.complete(Optional.empty()); + doReturn(policyFuture2).when(namespaceResources).getPoliciesAsync(namespaceName); + response = mock(AsyncResponse.class); + errCaptor = ArgumentCaptor.forClass(RestException.class); + persistentTopics.createPartitionedTopic(response, testTenant, testNamespace, topicName, 2, true); + verify(response, timeout(5000).times(1)).resume(errCaptor.capture()); + Assert.assertEquals(errCaptor.getValue().getResponse().getStatus(), Response.Status.NOT_FOUND.getStatusCode()); + Assert.assertTrue(errCaptor.getValue().getMessage().contains("Namespace not found")); + } + @Test(expectedExceptions = RestException.class) public void testCreateNonPartitionedTopicWithInvalidName() { final String topicName = "standard-topic-partition-10"; @@ -707,7 +739,7 @@ public void testGetBacklogSizeByMessageId() throws Exception{ completableFuture = batchProducer.sendAsync("a".getBytes()); } completableFuture.get(); - Assert.assertEquals(Optional.ofNullable(admin.topics().getBacklogSizeByMessageId(topicName + "-partition-0", MessageId.earliest)), Optional.of(350L)); + Assert.assertEquals(Optional.ofNullable(admin.topics().getBacklogSizeByMessageId(topicName + "-partition-0", MessageId.earliest)), Optional.of(320L)); } @Test @@ -915,6 +947,7 @@ public void testSetReplicatedSubscriptionStatus() { Assert.assertEquals(responseCaptor.getValue().getStatus(), Response.Status.NO_CONTENT.getStatusCode()); } + @Test public void testGetMessageById() throws Exception { TenantInfoImpl tenantInfo = new TenantInfoImpl(Sets.newHashSet("role1", "role2"), Sets.newHashSet("test")); admin.tenants().createTenant("tenant-xyz", tenantInfo); @@ -1087,4 +1120,51 @@ public void onSendAcknowledgement(Producer producer, Message message, MessageId Assert.assertTrue(admin.topics().getMessageIdByTimestamp(topicName, publish2 + 1) .compareTo(id2) > 0); } + + @Test + public void testDeleteTopic() throws Exception { + final String topicName = "topic-1"; + BrokerService brokerService = spy(pulsar.getBrokerService()); + doReturn(brokerService).when(pulsar).getBrokerService(); + persistentTopics.createNonPartitionedTopic(testTenant, testNamespace, topicName, false); + CompletableFuture deleteTopicFuture = new CompletableFuture<>(); + deleteTopicFuture.completeExceptionally(new MetadataStoreException.NotFoundException()); + doReturn(deleteTopicFuture).when(brokerService).deleteTopic(anyString(), anyBoolean(), anyBoolean()); + persistentTopics.deleteTopic(testTenant, testNamespace, topicName, true, true, true); + // + CompletableFuture deleteTopicFuture2 = new CompletableFuture<>(); + deleteTopicFuture2.completeExceptionally(new MetadataStoreException("test exception")); + doReturn(deleteTopicFuture2).when(brokerService).deleteTopic(anyString(), anyBoolean(), anyBoolean()); + try { + persistentTopics.deleteTopic(testTenant, testNamespace, topicName, true, true, true); + } catch (Exception e) { + Assert.assertTrue(e instanceof RestException); + } + // + CompletableFuture deleteTopicFuture3 = new CompletableFuture<>(); + deleteTopicFuture3.completeExceptionally(new MetadataStoreException.NotFoundException()); + doReturn(deleteTopicFuture3).when(brokerService).deleteTopic(anyString(), anyBoolean(), anyBoolean()); + try { + persistentTopics.deleteTopic(testTenant, testNamespace, topicName, false, true, true); + } catch (RestException e) { + Assert.assertEquals(e.getResponse().getStatus(), 404); + } + } + + @Test + public void testResetCursorReturnTimeoutWhenZKTimeout() { + String topic = "persistent://" + testTenant + "/" + testNamespace + "/" + "topic-2"; + BrokerService brokerService = spy(pulsar.getBrokerService()); + doReturn(brokerService).when(pulsar).getBrokerService(); + CompletableFuture> completableFuture = new CompletableFuture<>(); + doReturn(completableFuture).when(brokerService).getTopicIfExists(topic); + try { + admin.topics().resetCursor(topic, "my-sub", System.currentTimeMillis()); + Assert.fail(); + } catch (PulsarAdminException e) { + String errorMsg = ((InternalServerErrorException) e.getCause()).getResponse().readEntity(String.class); + Assert.assertTrue(errorMsg.contains("TimeoutException")); + } + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/ResourceGroupsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/ResourceGroupsTest.java index b6510a18995bf..4910f6798bae4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/ResourceGroupsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/ResourceGroupsTest.java @@ -52,7 +52,7 @@ public class ResourceGroupsTest extends MockedPulsarServiceBaseTest { @Override protected void setup() throws Exception { super.internalSetup(); - resourcegroups = spy(new ResourceGroups()); + resourcegroups = spy(ResourceGroups.class); resourcegroups.setServletContext(new MockServletContext()); resourcegroups.setPulsar(pulsar); doReturn(false).when(resourcegroups).isRequestHttps(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java index 59494fb799d0c..7af136a0f97f2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicPoliciesTest.java @@ -44,7 +44,9 @@ import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BacklogQuotaManager; +import org.apache.pulsar.broker.service.AbstractTopic; import org.apache.pulsar.broker.service.PublishRateLimiterImpl; +import org.apache.pulsar.broker.service.SystemTopicBasedTopicPoliciesService; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter; import org.apache.pulsar.broker.service.persistent.PersistentTopic; @@ -63,6 +65,8 @@ import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.common.api.proto.CommandSubscribe; import org.apache.pulsar.common.events.EventsTopicNames; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.ClusterData; @@ -125,6 +129,47 @@ public void cleanup() throws Exception { this.resetConfig(); } + @Test + public void testTopicPolicyInitialValueWithNamespaceAlreadyLoaded() throws Exception{ + TopicName topicName = TopicName.get( + TopicDomain.persistent.value(), + NamespaceName.get(myNamespace), + "test-" + UUID.randomUUID() + ); + String topic = topicName.toString(); + + SystemTopicBasedTopicPoliciesService policyService = + (SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService(); + + //set up topic with inactiveTopicPolicies.maxInactiveDurationSeconds = 100 + InactiveTopicPolicies inactiveTopicPolicies = + new InactiveTopicPolicies(InactiveTopicDeleteMode.delete_when_subscriptions_caught_up, 100, true); + admin.topics().createNonPartitionedTopic(topic); + admin.topicPolicies().setInactiveTopicPolicies(topic, inactiveTopicPolicies); + + //wait until topic loaded with right policy value. + Awaitility.await().untilAsserted(()-> { + AbstractTopic topic1 = (AbstractTopic) pulsar.getBrokerService().getTopic(topic, true).get().get(); + assertEquals(topic1.getInactiveTopicPolicies().getMaxInactiveDurationSeconds(), 100); + }); + //unload the topic + pulsar.getNamespaceService().unloadNamespaceBundle(pulsar.getNamespaceService().getBundle(topicName)).get(); + assertFalse(pulsar.getBrokerService().getTopics().containsKey(topic)); + + //load the nameserver, but topic is not init. + log.info("lookup:{}",admin.lookups().lookupTopic(topic)); + assertTrue(pulsar.getBrokerService().isTopicNsOwnedByBroker(topicName)); + assertFalse(pulsar.getBrokerService().getTopics().containsKey(topic)); + //make sure namespace policy reader is fully started. + Awaitility.await().untilAsserted(()-> { + assertTrue(policyService.getPoliciesCacheInit(topicName.getNamespaceObject())); + }); + + //load the topic. + AbstractTopic topic1 = (AbstractTopic) pulsar.getBrokerService().getTopic(topic, true).get().get(); + assertEquals(topic1.getInactiveTopicPolicies().getMaxInactiveDurationSeconds(), 100); + } + @Test public void testSetSizeBasedBacklogQuota() throws Exception { @@ -1397,7 +1442,7 @@ public void testRemovePublishRate() throws Exception { @Test public void testCheckMaxConsumers() throws Exception { - Integer maxProducers = new Integer(-1); + Integer maxProducers = -1; log.info("MaxConsumers: {} will set to the topic: {}", maxProducers, testTopic); try { admin.topics().setMaxConsumers(testTopic, maxProducers); @@ -2643,4 +2688,27 @@ public void testDoNotCreateSystemTopicForHeartbeatNamespace() { }); } + @Test + public void testLoopCreateAndDeleteTopicPolicies() throws Exception { + final String topic = testTopic + UUID.randomUUID(); + + int n = 0; + while (n < 2) { + n++; + pulsarClient.newProducer().topic(topic).create().close(); + Awaitility.await().untilAsserted(() -> { + Assertions.assertThat(pulsar.getTopicPoliciesService().getTopicPolicies(TopicName.get(topic))).isNull(); + }); + + admin.topics().setMaxConsumersPerSubscription(topic, 1); + Awaitility.await().untilAsserted(() -> { + Assertions.assertThat(pulsar.getTopicPoliciesService().getTopicPolicies(TopicName.get(topic))).isNotNull(); + }); + + admin.topics().delete(topic); + Awaitility.await().untilAsserted(() -> { + Assertions.assertThat(pulsar.getTopicPoliciesService().getTopicPolicies(TopicName.get(topic))).isNull(); + }); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java new file mode 100644 index 0000000000000..185053ed7029b --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsAuthTest.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.collect.Sets; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.SignatureAlgorithm; +import java.util.Base64; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import javax.crypto.SecretKey; +import javax.ws.rs.client.Client; +import javax.ws.rs.client.ClientBuilder; +import javax.ws.rs.client.Entity; +import javax.ws.rs.client.WebTarget; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminBuilder; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.client.impl.schema.StringSchema; +import org.apache.pulsar.common.policies.data.AuthAction; +import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.websocket.data.ProducerMessage; +import org.apache.pulsar.websocket.data.ProducerMessages; +import org.glassfish.jersey.client.ClientConfig; +import org.glassfish.jersey.client.ClientProperties; +import org.glassfish.jersey.media.multipart.MultiPartFeature; +import org.mockito.Mockito; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +public class TopicsAuthTest extends MockedPulsarServiceBaseTest { + + private final String testLocalCluster = "test"; + private final String testTenant = "my-tenant"; + private final String testNamespace = "my-namespace"; + private final String testTopicName = "my-topic"; + + private static final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + private static final String ADMIN_TOKEN = Jwts.builder().setSubject("admin").signWith(SECRET_KEY).compact(); + private static final String PRODUCE_TOKEN = Jwts.builder().setSubject("producer").signWith(SECRET_KEY).compact(); + private static final String CONSUME_TOKEN = Jwts.builder().setSubject("consumer").signWith(SECRET_KEY).compact(); + + @Override + @BeforeMethod + protected void setup() throws Exception { + // enable auth&auth and use JWT at broker + conf.setAuthenticationEnabled(true); + conf.setAuthorizationEnabled(true); + conf.getProperties().setProperty("tokenSecretKey", "data:;base64," + + Base64.getEncoder().encodeToString(SECRET_KEY.getEncoded())); + Set superUserRoles = new HashSet<>(); + superUserRoles.add("admin"); + conf.setSuperUserRoles(superUserRoles); + Set providers = new HashSet<>(); + providers.add(AuthenticationProviderToken.class.getName()); + conf.setAuthenticationProviders(providers); + super.internalSetup(); + PulsarAdminBuilder pulsarAdminBuilder = PulsarAdmin.builder().serviceHttpUrl(brokerUrl != null + ? brokerUrl.toString() : brokerUrlTls.toString()) + .authentication(AuthenticationToken.class.getName(), + ADMIN_TOKEN); + admin = Mockito.spy(pulsarAdminBuilder.build()); + admin.clusters().createCluster(testLocalCluster, new ClusterDataImpl()); + admin.tenants().createTenant(testTenant, new TenantInfoImpl(Sets.newHashSet("role1", "role2"), + Sets.newHashSet(testLocalCluster))); + admin.namespaces().createNamespace(testTenant + "/" + testNamespace, + Sets.newHashSet(testLocalCluster)); + admin.namespaces().grantPermissionOnNamespace(testTenant + "/" + testNamespace, "producer", + EnumSet.of(AuthAction.produce)); + admin.namespaces().grantPermissionOnNamespace(testTenant + "/" + testNamespace, "consumer", + EnumSet.of(AuthAction.consume)); + } + + @Override + @AfterMethod + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @DataProvider(name = "variations") + public static Object[][] variations() { + return new Object[][]{ + {CONSUME_TOKEN, 401}, + {PRODUCE_TOKEN, 200} + }; + } + + @Test(dataProvider = "variations") + public void testProduceToNonPartitionedTopic(String token, int status) throws Exception { + innerTestProduce(testTopicName, true, false, token, status); + } + + @Test(dataProvider = "variations") + public void testProduceToPartitionedTopic(String token, int status) throws Exception { + innerTestProduce(testTopicName, true, true, token, status); + } + + @Test(dataProvider = "variations") + public void testProduceOnNonPersistentNonPartitionedTopic(String token, int status) throws Exception { + innerTestProduce(testTopicName, false, false, token, status); + } + + @Test(dataProvider = "variations") + public void testProduceOnNonPersistentPartitionedTopic(String token, int status) throws Exception { + innerTestProduce(testTopicName, false, true, token, status); + } + + private void innerTestProduce(String createTopicName, boolean isPersistent, boolean isPartition, + String token, int status) throws Exception { + String topicPrefix = null; + if (isPersistent == true) { + topicPrefix = "persistent"; + } else { + topicPrefix = "non-persistent"; + } + if (isPartition == true) { + admin.topics().createPartitionedTopic(topicPrefix + "://" + testTenant + "/" + + testNamespace + "/" + createTopicName, 5); + } else { + admin.topics().createNonPartitionedTopic(topicPrefix + "://" + testTenant + "/" + + testNamespace + "/" + createTopicName); + } + Schema schema = StringSchema.utf8(); + ProducerMessages producerMessages = new ProducerMessages(); + producerMessages.setKeySchema(ObjectMapperFactory.getThreadLocal(). + writeValueAsString(schema.getSchemaInfo())); + producerMessages.setValueSchema(ObjectMapperFactory.getThreadLocal(). + writeValueAsString(schema.getSchemaInfo())); + String message = "[" + + "{\"key\":\"my-key\",\"payload\":\"RestProducer:1\",\"eventTime\":1603045262772,\"sequenceId\":1}," + + "{\"key\":\"my-key\",\"payload\":\"RestProducer:2\",\"eventTime\":1603045262772,\"sequenceId\":2}]"; + producerMessages.setMessages(ObjectMapperFactory.getThreadLocal().readValue(message, + new TypeReference>() { + })); + + WebTarget root = buildWebClient(); + String requestPath = null; + if (isPartition == true) { + requestPath = "/topics/" + topicPrefix + "/" + testTenant + "/" + testNamespace + "/" + + createTopicName + "/partitions/2"; + } else { + requestPath = "/topics/" + topicPrefix + "/" + testTenant + "/" + testNamespace + "/" + createTopicName; + } + + Response response = root.path(requestPath) + .request(MediaType.APPLICATION_JSON) + .header("Authorization", "Bearer " + token) + .post(Entity.json(producerMessages)); + Assert.assertEquals(response.getStatus(), status); + } + + WebTarget buildWebClient() throws Exception { + ClientConfig httpConfig = new ClientConfig(); + httpConfig.property(ClientProperties.FOLLOW_REDIRECTS, true); + httpConfig.property(ClientProperties.ASYNC_THREADPOOL_SIZE, 8); + httpConfig.register(MultiPartFeature.class); + + javax.ws.rs.client.ClientBuilder clientBuilder = ClientBuilder.newBuilder().withConfig(httpConfig); + Client client = clientBuilder.build(); + return client.target(brokerUrl.toString()); + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsTest.java index 3f57806c17767..6466c9495c16e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/TopicsTest.java @@ -25,13 +25,11 @@ import lombok.NoArgsConstructor; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericRecord; -import org.apache.avro.io.BinaryEncoder; import org.apache.avro.io.EncoderFactory; import org.apache.avro.io.JsonEncoder; import org.apache.avro.reflect.ReflectDatumWriter; import org.apache.avro.util.Utf8; import org.apache.pulsar.broker.PulsarService; -import org.apache.pulsar.broker.admin.v2.PersistentTopics; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.authentication.AuthenticationDataHttps; import org.apache.pulsar.broker.namespace.NamespaceService; @@ -70,7 +68,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.reflect.Whitebox; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -80,6 +77,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; import java.io.ByteArrayOutputStream; +import java.net.URI; import java.util.Arrays; import java.util.List; import java.util.Optional; @@ -97,7 +95,6 @@ import static org.powermock.api.mockito.PowerMockito.mock; import static org.powermock.api.mockito.PowerMockito.spy; -@PrepareForTest(PersistentTopics.class) public class TopicsTest extends MockedPulsarServiceBaseTest { private Topics topics; @@ -317,13 +314,13 @@ public Object answer(InvocationOnMock invocationOnMock) throws Throwable { @Test public void testLookUpWithRedirect() throws Exception { String topicName = "persistent://" + testTenant + "/" + testNamespace + "/" + testTopicName; - String requestPath = "/admin/v3/topics/my-tenant/my-namespace/my-topic"; + URI requestPath = URI.create(pulsar.getWebServiceAddress() + "/topics/my-tenant/my-namespace/my-topic"); //create topic on one broker admin.topics().createNonPartitionedTopic(topicName); PulsarService pulsar2 = startBroker(getDefaultConf()); doReturn(false).when(topics).isRequestHttps(); UriInfo uriInfo = mock(UriInfo.class); - doReturn(requestPath).when(uriInfo).getPath(anyBoolean()); + doReturn(requestPath).when(uriInfo).getRequestUri(); Whitebox.setInternalState(topics, "uri", uriInfo); //do produce on another broker topics.setPulsar(pulsar2); @@ -340,8 +337,7 @@ public void testLookUpWithRedirect() throws Exception { // Verify got redirect response Assert.assertEquals(responseCaptor.getValue().getStatusInfo(), Response.Status.TEMPORARY_REDIRECT); // Verify URI point to address of broker the topic was created on - Assert.assertEquals(responseCaptor.getValue().getLocation().toString(), - pulsar.getWebServiceAddress() + requestPath); + Assert.assertEquals(responseCaptor.getValue().getLocation().toString(), requestPath.toString()); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest2.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApi2Test.java similarity index 99% rename from pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest2.java rename to pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApi2Test.java index 4f9c8603a9e9a..2eb37c222d614 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest2.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApi2Test.java @@ -73,7 +73,7 @@ import org.testng.annotations.Test; @Test(groups = "broker") -public class V1_AdminApiTest2 extends MockedPulsarServiceBaseTest { +public class V1_AdminApi2Test extends MockedPulsarServiceBaseTest { private MockedPulsarService mockPulsarSetup; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java index 06d066f4c38b2..16bf0553345da 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v1/V1_AdminApiTest.java @@ -650,6 +650,7 @@ public void namespaces() throws Exception { Policies policies = new Policies(); policies.bundles = PoliciesUtil.defaultBundle(); policies.auth_policies.getNamespaceAuthentication().put("my-role", EnumSet.allOf(AuthAction.class)); + policies.is_allow_auto_update_schema = conf.isAllowAutoUpdateSchemaEnabled(); assertEquals(admin.namespaces().getPolicies("prop-xyz/use/ns1"), policies); assertEquals(admin.namespaces().getPermissions("prop-xyz/use/ns1"), policies.auth_policies.getNamespaceAuthentication()); @@ -658,6 +659,7 @@ public void namespaces() throws Exception { admin.namespaces().revokePermissionsOnNamespace("prop-xyz/use/ns1", "my-role"); policies.auth_policies.getNamespaceAuthentication().remove("my-role"); + policies.is_allow_auto_update_schema = conf.isAllowAutoUpdateSchemaEnabled(); assertEquals(admin.namespaces().getPolicies("prop-xyz/use/ns1"), policies); assertNull(admin.namespaces().getPersistence("prop-xyz/use/ns1")); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionMultiBrokerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionMultiBrokerTest.java new file mode 100644 index 0000000000000..add277fc524d7 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionMultiBrokerTest.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin.v3; + +import java.util.Map; +import java.util.concurrent.TimeUnit; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.common.naming.TopicName; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker-admin") +public class AdminApiTransactionMultiBrokerTest extends TransactionTestBase { + + private static final int NUM_BROKERS = 16; + private static final int NUM_PARTITIONS = 3; + + @BeforeMethod + protected void setup() throws Exception { + setUpBase(NUM_BROKERS, NUM_PARTITIONS, NAMESPACE1 + "/test", 0); + } + + @AfterMethod(alwaysRun = true) + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testRedirectOfGetCoordinatorInternalStats() throws Exception { + Map map = admin.lookups() + .lookupPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString()); + while (map.containsValue(getPulsarServiceList().get(0).getBrokerServiceUrl())) { + admin.topics().deletePartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString()); + admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), NUM_PARTITIONS); + map = admin.lookups().lookupPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString()); + } + //init tc stores + pulsarClient = PulsarClient.builder() + .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) + .statsInterval(0, TimeUnit.SECONDS) + .enableTransaction(true) + .build(); + for (int i = 0; i < NUM_PARTITIONS; i++) { + admin.transactions().getCoordinatorInternalStats(i, false); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java index 93a2d0e188e17..ede74be63d868 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/AdminApiTransactionTest.java @@ -20,8 +20,10 @@ import com.google.common.collect.Sets; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.http.HttpStatus; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; @@ -56,13 +58,16 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +@Test(groups = "broker-admin") public class AdminApiTransactionTest extends MockedPulsarServiceBaseTest { @BeforeMethod @@ -119,6 +124,25 @@ public void testGetTransactionInBufferStats() throws Exception { initTransaction(2); TransactionImpl transaction = (TransactionImpl) getTransaction(); final String topic = "persistent://public/default/testGetTransactionInBufferStats"; + try { + admin.transactions() + .getTransactionInBufferStatsAsync(new TxnID(1, 1), topic).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } + try { + pulsar.getBrokerService().getTopic(topic, false); + admin.transactions() + .getTransactionInBufferStatsAsync(new TxnID(1, 1), topic).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).sendTimeout(0, TimeUnit.SECONDS).create(); MessageId messageId = producer.newMessage(transaction).value("Hello pulsar!".getBytes()).send(); @@ -144,6 +168,27 @@ public void testGetTransactionPendingAckStats() throws Exception { initTransaction(2); final String topic = "persistent://public/default/testGetTransactionInBufferStats"; final String subName = "test"; + try { + admin.transactions() + .getTransactionInPendingAckStatsAsync(new TxnID(1, + 2), topic, subName).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } + try { + pulsar.getBrokerService().getTopic(topic, false); + admin.transactions() + .getTransactionInPendingAckStatsAsync(new TxnID(1, + 2), topic, subName).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).create(); Consumer consumer = pulsarClient.newConsumer(Schema.BYTES).topic(topic) @@ -250,8 +295,26 @@ public void testGetTransactionBufferStats() throws Exception { final String topic = "persistent://public/default/testGetTransactionBufferStats"; final String subName1 = "test1"; final String subName2 = "test2"; + try { + admin.transactions() + .getTransactionBufferStatsAsync(topic).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } + try { + pulsar.getBrokerService().getTopic(topic, false); + admin.transactions() + .getTransactionBufferStatsAsync(topic).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } admin.topics().createNonPartitionedTopic(topic); - Producer producer = pulsarClient.newProducer(Schema.BYTES) .sendTimeout(0, TimeUnit.SECONDS).topic(topic).create(); Consumer consumer1 = pulsarClient.newConsumer(Schema.BYTES).topic(topic) @@ -287,6 +350,25 @@ public void testGetPendingAckStats(String ackType) throws Exception { initTransaction(2); final String topic = "persistent://public/default/testGetPendingAckStats"; final String subName = "test1"; + try { + admin.transactions() + .getPendingAckStatsAsync(topic, subName).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } + try { + pulsar.getBrokerService().getTopic(topic, false); + admin.transactions() + .getPendingAckStatsAsync(topic, subName).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES) @@ -374,6 +456,25 @@ public void testGetPendingAckInternalStats() throws Exception { TransactionImpl transaction = (TransactionImpl) getTransaction(); final String topic = "persistent://public/default/testGetPendingAckInternalStats"; final String subName = "test"; + try { + admin.transactions() + .getPendingAckInternalStatsAsync(topic, subName, true).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } + try { + pulsar.getBrokerService().getTopic(topic, false); + admin.transactions() + .getPendingAckInternalStatsAsync(topic, subName, true).get(); + fail("Should failed here"); + } catch (ExecutionException ex) { + assertTrue(ex.getCause() instanceof PulsarAdminException.NotFoundException); + PulsarAdminException.NotFoundException cause = (PulsarAdminException.NotFoundException)ex.getCause(); + assertEquals(cause.getMessage(), "Topic not found"); + } admin.topics().createNonPartitionedTopic(topic); Producer producer = pulsarClient.newProducer(Schema.BYTES).topic(topic).create(); Consumer consumer = pulsarClient.newConsumer(Schema.BYTES).topic(topic) @@ -408,6 +509,18 @@ public void testGetPendingAckInternalStats() throws Exception { assertNull(managedLedgerInternalStats.ledgers.get(0).metadata); } + @Test(timeOut = 20000) + public void testTransactionNotEnabled() throws Exception { + stopBroker(); + conf.setTransactionCoordinatorEnabled(false); + super.internalSetup(); + try { + admin.transactions().getCoordinatorInternalStats(1, false); + } catch (PulsarAdminException ex) { + assertEquals(ex.getStatusCode(), HttpStatus.SC_SERVICE_UNAVAILABLE); + } + } + private static void verifyCoordinatorStats(String state, long sequenceId, long lowWaterMark) { assertEquals(state, "Ready"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/PackagesApiNotEnabledTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/PackagesApiNotEnabledTest.java new file mode 100644 index 0000000000000..becddb173b269 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/PackagesApiNotEnabledTest.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.admin.v3; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Comparator; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.packages.management.core.common.PackageMetadata; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class PackagesApiNotEnabledTest extends MockedPulsarServiceBaseTest { + + @BeforeMethod + @Override + protected void setup() throws Exception { + // not enable Package Management Service + conf.setEnablePackagesManagement(false); + super.internalSetup(); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test(timeOut = 60000) + public void testPackagesOperationsWithoutPackagesServiceEnabled() throws Exception { + // download package api should return 503 Service Unavailable exception + String unknownPackageName = "function://public/default/unknown@v1"; + Path tmp = Files.createTempDirectory("package-test-tmp"); + try { + admin.packages().download(unknownPackageName, tmp.toAbsolutePath().toString() + "/unknown"); + fail("should throw 503 error"); + } catch (PulsarAdminException e) { + assertEquals(503, e.getStatusCode()); + } finally { + Files.walk(tmp).sorted(Comparator.reverseOrder()).forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + // get metadata api should return 503 Service Unavailable exception + try { + admin.packages().getMetadata(unknownPackageName); + fail("should throw 503 error"); + } catch (PulsarAdminException e) { + assertEquals(503, e.getStatusCode()); + } + + // update metadata api should return 503 Service Unavailable exception + try { + admin.packages().updateMetadata(unknownPackageName, + PackageMetadata.builder().description("unknown").build()); + fail("should throw 503 error"); + } catch (PulsarAdminException e) { + assertEquals(503, e.getStatusCode()); + } + + // list all the packages api should return 503 Service Unavailable exception + try { + admin.packages().listPackages("function", "unknown/unknown"); + fail("should throw 503 error"); + } catch (PulsarAdminException e) { + assertEquals(503, e.getStatusCode()); + } + + // list all the versions api should return 503 Service Unavailable exception + try { + admin.packages().listPackageVersions(unknownPackageName); + fail("should throw 503 error"); + } catch (PulsarAdminException e) { + assertEquals(503, e.getStatusCode()); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/PackagesApiTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/PackagesApiTest.java index 05111c50b9ba8..79efb42f63c21 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/PackagesApiTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/admin/v3/PackagesApiTest.java @@ -32,7 +32,11 @@ import org.testng.annotations.Test; import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Collections; +import java.util.Comparator; import java.util.List; @Test(groups = "broker") @@ -101,18 +105,30 @@ public void testPackagesOperations() throws Exception { } @Test(timeOut = 60000) - public void testPackagesOperationsFailed() { + public void testPackagesOperationsFailed() throws IOException { // download a non-existent package should return not found exception String unknownPackageName = "function://public/default/unknown@v1"; + + Path tmp = Files.createTempDirectory("package-test-tmp"); try { - admin.packages().download(unknownPackageName, "/test/unknown"); + admin.packages().download(unknownPackageName, tmp.toAbsolutePath() + "/unknown"); + fail("should throw 404 error"); } catch (PulsarAdminException e) { assertEquals(404, e.getStatusCode()); + } finally { + Files.walk(tmp).sorted(Comparator.reverseOrder()).forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); } // get the metadata of a non-existent package should return not found exception try { admin.packages().getMetadata(unknownPackageName); + fail("should throw 404 error"); } catch (PulsarAdminException e) { assertEquals(404, e.getStatusCode()); } @@ -121,6 +137,7 @@ public void testPackagesOperationsFailed() { try { admin.packages().updateMetadata(unknownPackageName, PackageMetadata.builder().description("unknown").build()); + fail("should throw 404 error"); } catch (PulsarAdminException e) { assertEquals(404, e.getStatusCode()); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java index dcdc602985b64..2596d243a9f87 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/AuthorizationTest.java @@ -18,24 +18,25 @@ */ package org.apache.pulsar.broker.auth; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - import java.util.EnumSet; - import org.apache.pulsar.broker.authorization.AuthorizationService; +import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminBuilder; +import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AuthAction; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.SubscriptionAuthMode; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; - import com.google.common.collect.Sets; @Test(groups = "flaky") @@ -201,7 +202,12 @@ public void simple() throws Exception { // tests for subscription auth mode admin.namespaces().grantPermissionOnNamespace("p1/c1/ns1", "*", EnumSet.of(AuthAction.consume)); + admin.namespaces().setSubscriptionAuthMode("p1/c1/ns1", SubscriptionAuthMode.None); + Assert.assertEquals(admin.namespaces().getSubscriptionAuthMode("p1/c1/ns1"), + SubscriptionAuthMode.None); admin.namespaces().setSubscriptionAuthMode("p1/c1/ns1", SubscriptionAuthMode.Prefix); + Assert.assertEquals(admin.namespaces().getSubscriptionAuthMode("p1/c1/ns1"), + SubscriptionAuthMode.Prefix); waitForChange(); assertTrue(auth.canLookup(TopicName.get("persistent://p1/c1/ns1/ds1"), "role1", null)); @@ -224,6 +230,27 @@ public void simple() throws Exception { admin.clusters().deleteCluster("c1"); } + @Test + public void testGetListWithGetBundleOp() throws Exception { + String tenant = "p1"; + String namespaceV1 = "p1/global/ns1"; + String namespaceV2 = "p1/ns2"; + admin.clusters().createCluster("c1", ClusterData.builder().build()); + admin.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet("role1"), Sets.newHashSet("c1"))); + admin.namespaces().createNamespace(namespaceV1, Sets.newHashSet("c1")); + admin.namespaces().grantPermissionOnNamespace(namespaceV1, "pass.pass2", EnumSet.of(AuthAction.produce)); + admin.namespaces().createNamespace(namespaceV2, Sets.newHashSet("c1")); + admin.namespaces().grantPermissionOnNamespace(namespaceV2, "pass.pass2", EnumSet.of(AuthAction.produce)); + PulsarAdmin admin2 = PulsarAdmin.builder().serviceHttpUrl(brokerUrl != null + ? brokerUrl.toString() + : brokerUrlTls.toString()) + .authentication(new MockAuthentication("pass.pass2")) + .build(); + when(pulsar.getAdminClient()).thenReturn(admin2); + Assert.assertEquals(admin2.topics().getList(namespaceV1, TopicDomain.non_persistent).size(), 0); + Assert.assertEquals(admin2.topics().getList(namespaceV2, TopicDomain.non_persistent).size(), 0); + } + private static void waitForChange() { try { Thread.sleep(100); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java index 49e7ef30fa70c..27ac9cb869dca 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/auth/MockedPulsarServiceBaseTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.auth; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -59,7 +60,10 @@ import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; import org.apache.pulsar.metadata.impl.ZKMetadataStore; import org.apache.pulsar.tests.TestRetrySupport; import org.apache.pulsar.zookeeper.ZooKeeperClientFactory; @@ -245,6 +249,11 @@ protected final void internalCleanup() throws Exception { } bkExecutor = null; } + onCleanup(); + } + + protected void onCleanup() { + } protected abstract void setup() throws Exception; @@ -317,10 +326,11 @@ protected void setupBrokerMocks(PulsarService pulsar) throws Exception { // Override default providers with mocked ones doReturn(mockZooKeeperClientFactory).when(pulsar).getZooKeeperClientFactory(); doReturn(mockBookKeeperClientFactory).when(pulsar).newBookKeeperClientFactory(); - doReturn(new ZKMetadataStore(mockZooKeeper)).when(pulsar).createLocalMetadataStore(); - doReturn(new ZKMetadataStore(mockZooKeeperGlobal)).when(pulsar).createConfigurationMetadataStore(); + doReturn(createLocalMetadataStore()).when(pulsar).createLocalMetadataStore(); + doReturn(createConfigurationMetadataStore()).when(pulsar).createConfigurationMetadataStore(); - Supplier namespaceServiceSupplier = () -> spy(new NamespaceService(pulsar)); + Supplier namespaceServiceSupplier = + () -> spyWithClassAndConstructorArgs(NamespaceService.class, pulsar); doReturn(namespaceServiceSupplier).when(pulsar).getNamespaceServiceProvider(); doReturn(sameThreadOrderedSafeExecutor).when(pulsar).getOrderedExecutor(); @@ -332,6 +342,14 @@ protected void setupBrokerMocks(PulsarService pulsar) throws Exception { } } + protected MetadataStoreExtended createLocalMetadataStore() throws MetadataStoreException { + return new ZKMetadataStore(mockZooKeeper); + } + + protected MetadataStoreExtended createConfigurationMetadataStore() throws MetadataStoreException { + return new ZKMetadataStore(mockZooKeeperGlobal); + } + private void mockConfigBrokerInterceptors(PulsarService pulsarService) { ServiceConfiguration configuration = spy(conf); Set mockBrokerInterceptors = mock(Set.class); @@ -376,7 +394,7 @@ public static MockZooKeeper createMockZooKeeperGlobal() { } public static NonClosableMockBookKeeper createMockBookKeeper(OrderedExecutor executor) throws Exception { - return spy(new NonClosableMockBookKeeper(executor)); + return spyWithClassAndConstructorArgs(NonClosableMockBookKeeper.class, executor); } // Prevent the MockBookKeeper instance from being closed when the broker is restarted within a test @@ -482,5 +500,24 @@ protected static ServiceConfiguration getDefaultConf() { return configuration; } + protected void setupDefaultTenantAndNamespace() throws Exception { + final String tenant = "public"; + final String namespace = tenant + "/default"; + + if (!admin.clusters().getClusters().contains(configClusterName)) { + admin.clusters().createCluster(configClusterName, + ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); + } + + if (!admin.tenants().getTenants().contains(tenant)) { + admin.tenants().createTenant(tenant, TenantInfo.builder().allowedClusters( + Sets.newHashSet(configClusterName)).build()); + } + + if (!admin.namespaces().getNamespaces(tenant).contains(namespace)) { + admin.namespaces().createNamespace(namespace); + } + } + private static final Logger log = LoggerFactory.getLogger(MockedPulsarServiceBaseTest.class); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java index d7b304d8a0c24..11b681d80a640 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/delayed/InMemoryDeliveryTrackerTest.java @@ -21,17 +21,20 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; - +import io.netty.util.HashedWheelTimer; import io.netty.util.Timeout; import io.netty.util.Timer; import io.netty.util.TimerTask; - +import io.netty.util.concurrent.DefaultThreadFactory; import java.time.Clock; import java.util.Collections; import java.util.NavigableMap; @@ -39,28 +42,36 @@ import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - import lombok.Cleanup; - import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterClass; import org.testng.annotations.Test; @Test(groups = "broker") public class InMemoryDeliveryTrackerTest { + // Create a single shared timer for the test. + private final Timer timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-in-memory-delayed-delivery-test"), + 500, TimeUnit.MILLISECONDS); + + @AfterClass(alwaysRun = true) + public void cleanup() { + timer.stop(); + } + @Test public void test() throws Exception { PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); - Timer timer = mock(Timer.class); - AtomicLong clockTime = new AtomicLong(); Clock clock = mock(Clock.class); when(clock.millis()).then(x -> clockTime.get()); @Cleanup - InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock); + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, + false, 0); assertFalse(tracker.hasMessageAvailable()); @@ -131,7 +142,8 @@ public void testWithTimer() throws Exception { }); @Cleanup - InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock); + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, + false, 0); assertTrue(tasks.isEmpty()); assertTrue(tracker.addMessage(2, 2, 20)); @@ -160,29 +172,304 @@ public void testWithTimer() throws Exception { /** * Adding a message that is about to expire within the tick time should lead - * to a rejection from the tracker. + * to a rejection from the tracker when isDelayedDeliveryDeliverAtTimeStrict is false. */ @Test public void testAddWithinTickTime() { PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); - Timer timer = mock(Timer.class); - AtomicLong clockTime = new AtomicLong(); Clock clock = mock(Clock.class); when(clock.millis()).then(x -> clockTime.get()); @Cleanup - InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 100, clock); + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 100, clock, + false, 0); clockTime.set(0); assertFalse(tracker.addMessage(1, 1, 10)); assertFalse(tracker.addMessage(2, 2, 99)); - assertTrue(tracker.addMessage(3, 3, 100)); - assertTrue(tracker.addMessage(4, 4, 200)); + assertFalse(tracker.addMessage(3, 3, 100)); + assertTrue(tracker.addMessage(4, 4, 101)); + assertTrue(tracker.addMessage(5, 5, 200)); assertEquals(tracker.getNumberOfDelayedMessages(), 2); } + public void testAddMessageWithStrictDelay() { + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + @Cleanup + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 100, clock, + true, 0); + + clockTime.set(10); + + // Verify behavior for the less than, equal to, and greater than deliverAt times. + assertFalse(tracker.addMessage(1, 1, 9)); + assertFalse(tracker.addMessage(4, 4, 10)); + assertTrue(tracker.addMessage(1, 1, 11)); + + assertEquals(tracker.getNumberOfDelayedMessages(), 1); + assertFalse(tracker.hasMessageAvailable()); + } + + /** + * In this test, the deliverAt time is after now, but the deliverAt time is too early to run another tick, so the + * tickTimeMillis determines the delay. + */ + public void testAddMessageWithDeliverAtTimeAfterNowBeforeTickTimeFrequencyWithStrict() throws Exception { + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + // Use a short tick time to show that the timer task is run based on the deliverAt time in this scenario. + @Cleanup + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, + 1000, clock, true, 0); + + // Set clock time, then run tracker to inherit clock time as the last tick time. + clockTime.set(10000); + Timeout timeout = mock(Timeout.class); + when(timeout.isCancelled()).then(x -> false); + tracker.run(timeout); + verify(dispatcher, times(1)).readMoreEntries(); + + // Add a message that has a delivery time just after the previous run. It will get delivered based on the + // tick delay plus the last tick run. + assertTrue(tracker.addMessage(1, 1, 10001)); + + // Wait longer than the tick time plus the HashedWheelTimer's tick time to ensure that enough time has + // passed where it would have been triggered if the tick time was doing the triggering. + Thread.sleep(600); + verify(dispatcher, times(1)).readMoreEntries(); + + // Not wait for the message delivery to get triggered. + Awaitility.await().atMost(10, TimeUnit.SECONDS) + .untilAsserted(() -> verify(dispatcher).readMoreEntries()); + } + + /** + * In this test, the deliverAt time is after now, but before the (tickTimeMillis + now). Because there wasn't a + * recent tick run, the deliverAt time determines the delay. + */ + public void testAddMessageWithDeliverAtTimeAfterNowAfterTickTimeFrequencyWithStrict() { + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + // Use a large tick time to show that the message will get delivered earlier because there wasn't + // a previous tick run. + @Cleanup + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, + 100000, clock, true, 0); + + clockTime.set(500000); + + assertTrue(tracker.addMessage(1, 1, 500005)); + + // Wait long enough for the runnable to run, but not longer than the tick time. The point is that the delivery + // should get scheduled early when the tick duration has passed since the last tick. + Awaitility.await().atMost(10, TimeUnit.SECONDS) + .untilAsserted(() -> verify(dispatcher).readMoreEntries()); + } + + /** + * In this test, the deliverAt time is after now plus tickTimeMillis, so the tickTimeMillis determines the delay. + */ + public void testAddMessageWithDeliverAtTimeAfterFullTickTimeWithStrict() throws Exception { + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + // Use a short tick time to show that the timer task is run based on the deliverAt time in this scenario. + @Cleanup + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, + 500, clock, true, 0); + + clockTime.set(0); + + assertTrue(tracker.addMessage(1, 1, 2000)); + + // Wait longer than the tick time plus the HashedWheelTimer's tick time to ensure that enough time has + // passed where it would have been triggered if the tick time was doing the triggering. + Thread.sleep(1000); + verifyNoInteractions(dispatcher); + + // Not wait for the message delivery to get triggered. + Awaitility.await().atMost(10, TimeUnit.SECONDS) + .untilAsserted(() -> verify(dispatcher).readMoreEntries()); + } + + @Test + public void testWithFixedDelays() throws Exception { + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + final long fixedDelayLookahead = 100; + + @Cleanup + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, + true, fixedDelayLookahead); + + assertFalse(tracker.hasMessageAvailable()); + + assertTrue(tracker.addMessage(1, 1, 10)); + assertTrue(tracker.addMessage(2, 2, 20)); + assertTrue(tracker.addMessage(3, 3, 30)); + assertTrue(tracker.addMessage(4, 4, 40)); + assertTrue(tracker.addMessage(5, 5, 50)); + + assertFalse(tracker.hasMessageAvailable()); + assertEquals(tracker.getNumberOfDelayedMessages(), 5); + assertFalse(tracker.shouldPauseAllDeliveries()); + + for (int i = 6; i <= fixedDelayLookahead; i++) { + assertTrue(tracker.addMessage(i, i, i * 10)); + } + + assertTrue(tracker.shouldPauseAllDeliveries()); + + clockTime.set(fixedDelayLookahead * 10); + + tracker.getScheduledMessages(100); + assertFalse(tracker.shouldPauseAllDeliveries()); + + // Empty the tracker + int removed = 0; + do { + removed = tracker.getScheduledMessages(100).size(); + } while (removed > 0); + + assertFalse(tracker.shouldPauseAllDeliveries()); + } + + @Test + public void testWithMixedDelays() throws Exception { + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + long fixedDelayLookahead = 100; + + @Cleanup + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, + true, fixedDelayLookahead); + + assertFalse(tracker.hasMessageAvailable()); + + assertTrue(tracker.addMessage(1, 1, 10)); + assertTrue(tracker.addMessage(2, 2, 20)); + assertTrue(tracker.addMessage(3, 3, 30)); + assertTrue(tracker.addMessage(4, 4, 40)); + assertTrue(tracker.addMessage(5, 5, 50)); + + assertFalse(tracker.shouldPauseAllDeliveries()); + + for (int i = 6; i <= fixedDelayLookahead; i++) { + assertTrue(tracker.addMessage(i, i, i * 10)); + } + + assertTrue(tracker.shouldPauseAllDeliveries()); + + // Add message with earlier delivery time + assertTrue(tracker.addMessage(5, 5, 5)); + + assertFalse(tracker.shouldPauseAllDeliveries()); + } + + @Test + public void testWithNoDelays() throws Exception { + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + long fixedDelayLookahead = 100; + + @Cleanup + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, + true, fixedDelayLookahead); + + assertFalse(tracker.hasMessageAvailable()); + + assertTrue(tracker.addMessage(1, 1, 10)); + assertTrue(tracker.addMessage(2, 2, 20)); + assertTrue(tracker.addMessage(3, 3, 30)); + assertTrue(tracker.addMessage(4, 4, 40)); + assertTrue(tracker.addMessage(5, 5, 50)); + + assertFalse(tracker.shouldPauseAllDeliveries()); + + for (int i = 6; i <= fixedDelayLookahead; i++) { + assertTrue(tracker.addMessage(i, i, i * 10)); + } + + assertTrue(tracker.shouldPauseAllDeliveries()); + + // Add message with no-delay + assertFalse(tracker.addMessage(5, 5, -1L)); + + assertFalse(tracker.shouldPauseAllDeliveries()); + } + + @Test + public void testClose() throws Exception { + Timer timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-in-memory-delayed-delivery-test"), + 1, TimeUnit.MILLISECONDS); + + PersistentDispatcherMultipleConsumers dispatcher = mock(PersistentDispatcherMultipleConsumers.class); + + AtomicLong clockTime = new AtomicLong(); + Clock clock = mock(Clock.class); + when(clock.millis()).then(x -> clockTime.get()); + + final Exception[] exceptions = new Exception[1]; + + InMemoryDelayedDeliveryTracker tracker = new InMemoryDelayedDeliveryTracker(dispatcher, timer, 1, clock, + true, 0) { + @Override + public void run(Timeout timeout) throws Exception { + super.timeout = timer.newTimeout(this, 1, TimeUnit.MILLISECONDS); + if (timeout == null || timeout.isCancelled()) { + return; + } + try { + this.priorityQueue.peekN1(); + } catch (Exception e) { + e.printStackTrace(); + exceptions[0] = e; + } + } + }; + + tracker.addMessage(1, 1, 10); + clockTime.set(10); + + Thread.sleep(300); + + tracker.close(); + + assertNull(exceptions[0]); + + timer.stop(); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoaderTest.java index aa4a5bc4562f1..5288ab9954343 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/BrokerInterceptorWithClassLoaderTest.java @@ -18,14 +18,30 @@ */ package org.apache.pulsar.broker.intercept; -import org.apache.pulsar.broker.PulsarService; -import org.apache.pulsar.common.nar.NarClassLoader; -import org.testng.annotations.Test; - import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.testng.Assert.assertEquals; +import com.google.common.collect.Maps; +import io.netty.buffer.ByteBuf; +import org.apache.bookkeeper.mledger.Entry; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.service.Consumer; +import org.apache.pulsar.broker.service.Producer; +import org.apache.pulsar.broker.service.ServerCnx; +import org.apache.pulsar.broker.service.Subscription; +import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.common.api.proto.BaseCommand; +import org.apache.pulsar.common.api.proto.CommandAck; +import org.apache.pulsar.common.api.proto.MessageMetadata; +import org.apache.pulsar.common.intercept.InterceptException; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.testng.annotations.Test; +import javax.servlet.FilterChain; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import java.util.Map; /** * Unit test {@link BrokerInterceptorWithClassLoader}. @@ -33,6 +49,7 @@ @Test(groups = "broker") public class BrokerInterceptorWithClassLoaderTest { + @Test public void testWrapper() throws Exception { BrokerInterceptor h = mock(BrokerInterceptor.class); NarClassLoader loader = mock(NarClassLoader.class); @@ -43,4 +60,77 @@ public void testWrapper() throws Exception { verify(h, times(1)).initialize(same(pulsarService)); } + + @Test + public void testClassLoaderSwitcher() throws Exception { + NarClassLoader narLoader = mock(NarClassLoader.class); + BrokerInterceptor interceptor = new BrokerInterceptor() { + @Override + public void beforeSendMessage(Subscription subscription, Entry entry, long[] ackSet, MessageMetadata msgMetadata) { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + @Override + public void onPulsarCommand(BaseCommand command, ServerCnx cnx) throws InterceptException { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + @Override + public void onConnectionClosed(ServerCnx cnx) { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + @Override + public void onWebserviceRequest(ServletRequest request) { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + @Override + public void onWebserviceResponse(ServletRequest request, ServletResponse response) { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + @Override + public void onFilter(ServletRequest request, ServletResponse response, FilterChain chain) { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + @Override + public void initialize(PulsarService pulsarService) throws Exception { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + @Override + public void close() { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + }; + + BrokerInterceptorWithClassLoader brokerInterceptorWithClassLoader = + new BrokerInterceptorWithClassLoader(interceptor, narLoader); + ClassLoader curClassLoader = Thread.currentThread().getContextClassLoader(); + // test class loader + assertEquals(brokerInterceptorWithClassLoader.getClassLoader(), narLoader); + // test initialize + brokerInterceptorWithClassLoader.initialize(mock(PulsarService.class)); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test onFilter + brokerInterceptorWithClassLoader.onFilter(mock(ServletRequest.class) + , mock(ServletResponse.class), mock(FilterChain.class)); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test onWebserviceResponse + brokerInterceptorWithClassLoader.onWebserviceResponse(mock(ServletRequest.class) + , mock(ServletResponse.class)); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test onWebserviceRequest + brokerInterceptorWithClassLoader.onWebserviceRequest(mock(ServletRequest.class)); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test onConnectionClosed + brokerInterceptorWithClassLoader.onConnectionClosed(mock(ServerCnx.class)); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test onPulsarCommand + brokerInterceptorWithClassLoader.onPulsarCommand(null, mock(ServerCnx.class)); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test beforeSendMessage + brokerInterceptorWithClassLoader + .beforeSendMessage(mock(Subscription.class), mock(Entry.class), null, null); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test close + brokerInterceptorWithClassLoader.close(); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/CounterBrokerInterceptor.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/CounterBrokerInterceptor.java index dc51c3dc5154e..1462cfab89505 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/CounterBrokerInterceptor.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/intercept/CounterBrokerInterceptor.java @@ -56,14 +56,14 @@ public void beforeSendMessage(Subscription subscription, Entry entry, long[] ackSet, MessageMetadata msgMetadata) { - log.info("Send message to topic {}, subscription {}", + log.debug("Send message to topic {}, subscription {}", subscription.getTopic(), subscription.getName()); beforeSendCount++; } @Override public void onPulsarCommand(BaseCommand command, ServerCnx cnx) { - log.info("[{}] On [{}] Pulsar command", count, command.getType().name()); + log.debug("[{}] On [{}] Pulsar command", count, command.getType().name()); count ++; } @@ -75,13 +75,13 @@ public void onConnectionClosed(ServerCnx cnx) { @Override public void onWebserviceRequest(ServletRequest request) { count ++; - log.info("[{}] On [{}] Webservice request", count, ((HttpServletRequest)request).getRequestURL().toString()); + log.debug("[{}] On [{}] Webservice request", count, ((HttpServletRequest)request).getRequestURL().toString()); } @Override public void onWebserviceResponse(ServletRequest request, ServletResponse response) { count ++; - log.info("[{}] On [{}] Webservice response {}", count, ((HttpServletRequest)request).getRequestURL().toString(), response); + log.debug("[{}] On [{}] Webservice response {}", count, ((HttpServletRequest)request).getRequestURL().toString(), response); if (response instanceof Response) { Response res = (Response) response; responseList.add(new ResponseEvent(res.getHttpChannel().getRequest().getRequestURI(), res.getStatus())); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java new file mode 100644 index 0000000000000..489efa5755ba8 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AdvertisedListenersTest.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.loadbalance; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; + +import java.net.URI; +import java.util.Optional; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.util.PortManager; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.util.EntityUtils; +import org.apache.pulsar.broker.MultiBrokerBaseTest; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.common.lookup.data.LookupData; +import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class AdvertisedListenersTest extends MultiBrokerBaseTest { + @Override + protected int numberOfAdditionalBrokers() { + return 1; + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + + updateConfig(conf, "BROKER-X"); + } + + @Override + protected ServiceConfiguration createConfForAdditionalBroker(int additionalBrokerIndex) { + ServiceConfiguration conf = super.createConfForAdditionalBroker(additionalBrokerIndex); + updateConfig(conf, "BROKER-" + additionalBrokerIndex); + return conf; + } + + private void updateConfig(ServiceConfiguration conf, String advertisedAddress) { + int pulsarPort = PortManager.nextFreePort(); + int httpPort = PortManager.nextFreePort(); + int httpsPort = PortManager.nextFreePort(); + + // Use invalid domain name as identifier and instead make sure the advertised listeners work as intended + conf.setAdvertisedAddress(advertisedAddress); + conf.setAdvertisedListeners( + "public:pulsar://localhost:" + pulsarPort + + ",public_http:http://localhost:" + httpPort + + ",public_https:https://localhost:" + httpsPort); + conf.setBrokerServicePort(Optional.of(pulsarPort)); + conf.setWebServicePort(Optional.of(httpPort)); + conf.setWebServicePortTls(Optional.of(httpsPort)); + } + + @Test + public void testLookup() throws Exception { + HttpGet request = + new HttpGet(pulsar.getWebServiceAddress() + "/lookup/v2/topic/persistent/public/default/my-topic"); + request.addHeader(HttpHeaders.CONTENT_TYPE, "application/json"); + request.addHeader(HttpHeaders.ACCEPT, "application/json"); + final String topic = "my-topic"; + + @Cleanup + CloseableHttpClient httpClient = HttpClients.createDefault(); + + @Cleanup + CloseableHttpResponse response = httpClient.execute(request); + + HttpEntity entity = response.getEntity(); + LookupData ld = ObjectMapperFactory.getThreadLocal().readValue(EntityUtils.toString(entity), LookupData.class); + System.err.println("Lookup data: " + ld); + + assertEquals(new URI(ld.getBrokerUrl()).getHost(), "localhost"); + assertEquals(new URI(ld.getHttpUrl()).getHost(), "localhost"); + assertEquals(new URI(ld.getHttpUrlTls()).getHost(), "localhost"); + + + // Produce data + @Cleanup + Producer p = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + p.send("hello"); + + // Verify we can get the correct HTTP redirect to the advertised listener + for (PulsarAdmin a : getAllAdmins()) { + TopicStats s = a.topics().getStats(topic); + assertNotNull(a.lookups().lookupTopic(topic)); + assertEquals(s.getPublishers().size(), 1); + } + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java index 1429c7376f4ae..9e81a3e1db968 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java @@ -234,7 +234,8 @@ public void testAntiAffinityNamespaceFilteringWithDomain() throws Exception { brokerToDomainMap.put("brokerName-3", "domain-1"); Set candidate = Sets.newHashSet(); - ConcurrentOpenHashMap>> brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap>> brokerToNamespaceToBundleRange = + ConcurrentOpenHashMap.>>newBuilder().build(); assertEquals(brokers.size(), totalBrokers); @@ -320,7 +321,8 @@ public void testAntiAffinityNamespaceFilteringWithoutDomain() throws Exception { Set brokers = Sets.newHashSet(); Set candidate = Sets.newHashSet(); - ConcurrentOpenHashMap>> brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap>> brokerToNamespaceToBundleRange = + ConcurrentOpenHashMap.>>newBuilder().build(); brokers.add("broker-0"); brokers.add("broker-1"); brokers.add("broker-2"); @@ -367,9 +369,11 @@ public void testAntiAffinityNamespaceFilteringWithoutDomain() throws Exception { private void selectBrokerForNamespace( ConcurrentOpenHashMap>> brokerToNamespaceToBundleRange, String broker, String namespace, String assignedBundleName) { - ConcurrentOpenHashSet bundleSet = new ConcurrentOpenHashSet<>(); + ConcurrentOpenHashSet bundleSet = + ConcurrentOpenHashSet.newBuilder().build(); bundleSet.add(assignedBundleName); - ConcurrentOpenHashMap> nsToBundleMap = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap> nsToBundleMap = + ConcurrentOpenHashMap.>newBuilder().build(); nsToBundleMap.put(namespace, bundleSet); brokerToNamespaceToBundleRange.put(broker, nsToBundleMap); } @@ -469,7 +473,8 @@ public void testLoadSheddingUtilWithAntiAffinityNamespace() throws Exception { Set brokers = Sets.newHashSet(); Set candidate = Sets.newHashSet(); - ConcurrentOpenHashMap>> brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap>> brokerToNamespaceToBundleRange = + ConcurrentOpenHashMap.>>newBuilder().build(); brokers.add("broker-0"); brokers.add("broker-1"); brokers.add("broker-2"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java index d252b1cf326f4..ec19480ae06fa 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LeaderElectionServiceTest.java @@ -44,6 +44,8 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; + @Slf4j @Test(groups = "broker") public class LeaderElectionServiceTest { @@ -75,7 +77,7 @@ public void anErrorShouldBeThrowBeforeLeaderElected() throws PulsarServerExcepti config.setAdvertisedAddress("localhost"); config.setZookeeperServers("127.0.0.1" + ":" + bkEnsemble.getZookeeperPort()); @Cleanup - PulsarService pulsar = Mockito.spy(new MockPulsarService(config)); + PulsarService pulsar = spyWithClassAndConstructorArgs(MockPulsarService.class, config); pulsar.start(); // mock pulsar.getLeaderElectionService() in a thread safe way @@ -135,7 +137,7 @@ private void checkLookupException(String tenant, String namespace, PulsarClient } } - private static class MockPulsarService extends PulsarService { + public static class MockPulsarService extends PulsarService { public MockPulsarService(ServiceConfiguration config) { super(config); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java index 460210d33d6bf..2d9dc1abb0efc 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/LoadReportNetworkLimitTest.java @@ -56,8 +56,8 @@ public void checkLoadReportNicSpeed() throws Exception { LoadManagerReport report = admin.brokerStats().getLoadReport(); if (SystemUtils.IS_OS_LINUX) { - assertEquals(report.getBandwidthIn().limit, nicCount * 5.4 * 1024 * 1024); - assertEquals(report.getBandwidthOut().limit, nicCount * 5.4 * 1024 * 1024); + assertEquals(report.getBandwidthIn().limit, nicCount * 5.4 * 1000 * 1000); + assertEquals(report.getBandwidthOut().limit, nicCount * 5.4 * 1000 * 1000); } else { // On non-Linux system we don't report the network usage assertEquals(report.getBandwidthIn().limit, -1.0); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java new file mode 100644 index 0000000000000..462b640c17511 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/MultiBrokerLeaderElectionTest.java @@ -0,0 +1,168 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.loadbalance; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.MultiBrokerBaseTest; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.metadata.TestZKServer; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.awaitility.Awaitility; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class MultiBrokerLeaderElectionTest extends MultiBrokerBaseTest { + @Override + protected int numberOfAdditionalBrokers() { + return 9; + } + + TestZKServer testZKServer; + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + testZKServer = new TestZKServer(); + } + + @Override + protected void onCleanup() { + super.onCleanup(); + if (testZKServer != null) { + try { + testZKServer.close(); + } catch (Exception e) { + log.error("Error in stopping ZK server", e); + } + } + } + + @Override + protected MetadataStoreExtended createLocalMetadataStore() throws MetadataStoreException { + return MetadataStoreExtended.create(testZKServer.getConnectionString(), MetadataStoreConfig.builder().build()); + } + + @Override + protected MetadataStoreExtended createConfigurationMetadataStore() throws MetadataStoreException { + return MetadataStoreExtended.create(testZKServer.getConnectionString(), MetadataStoreConfig.builder().build()); + } + + @Test + public void shouldElectOneLeader() { + int leaders = 0; + for (PulsarService broker : getAllBrokers()) { + if (broker.getLeaderElectionService().isLeader()) { + leaders++; + } + } + assertEquals(leaders, 1); + } + + @Test + public void shouldAllBrokersKnowTheLeader() { + Awaitility.await().untilAsserted(() -> { + for (PulsarService broker : getAllBrokers()) { + Optional currentLeader = broker.getLeaderElectionService().getCurrentLeader(); + assertTrue(currentLeader.isPresent(), "Leader wasn't known on broker " + broker.getBrokerServiceUrl()); + } + }); + } + + @Test + public void shouldAllBrokersBeAbleToGetTheLeader() { + Awaitility.await().untilAsserted(() -> { + LeaderBroker leader = null; + for (PulsarService broker : getAllBrokers()) { + Optional currentLeader = + broker.getLeaderElectionService().readCurrentLeader().get(1, TimeUnit.SECONDS); + assertTrue(currentLeader.isPresent(), "Leader wasn't known on broker " + broker.getBrokerServiceUrl()); + if (leader != null) { + assertEquals(currentLeader.get(), leader, + "Different leader on broker " + broker.getBrokerServiceUrl()); + } else { + leader = currentLeader.get(); + } + } + }); + } + + @Test + public void shouldProvideConsistentAnswerToTopicLookups() + throws PulsarAdminException, ExecutionException, InterruptedException { + String topicNameBase = "persistent://public/default/lookuptest" + UUID.randomUUID() + "-"; + List topicNames = IntStream.range(0, 500).mapToObj(i -> topicNameBase + i) + .collect(Collectors.toList()); + List allAdmins = getAllAdmins(); + @Cleanup("shutdown") + ExecutorService executorService = Executors.newFixedThreadPool(allAdmins.size()); + List>> resultFutures = new ArrayList<>(); + String leaderBrokerUrl = admin.brokers().getLeaderBroker().getServiceUrl(); + log.info("LEADER is {}", leaderBrokerUrl); + // use Phaser to increase the chances of a race condition by triggering all threads once + // they are waiting just before the lookupTopic call + final Phaser phaser = new Phaser(1); + for (PulsarAdmin brokerAdmin : allAdmins) { + if (!leaderBrokerUrl.equals(brokerAdmin.getServiceUrl())) { + phaser.register(); + log.info("Doing lookup to broker {}", brokerAdmin.getServiceUrl()); + resultFutures.add(executorService.submit(() -> { + phaser.arriveAndAwaitAdvance(); + return topicNames.stream().map(topicName -> { + try { + return brokerAdmin.lookups().lookupTopic(topicName); + } catch (PulsarAdminException e) { + log.error("Error looking up topic {} in {}", topicName, brokerAdmin.getServiceUrl()); + throw new RuntimeException(e); + } + }).collect(Collectors.toList()); + })); + } + } + phaser.arriveAndAwaitAdvance(); + List firstResult = null; + for (Future> resultFuture : resultFutures) { + List result = resultFuture.get(); + if (firstResult == null) { + firstResult = result; + } else { + assertEquals(result, firstResult, "The lookup results weren't consistent."); + } + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java index 059191b787636..d08699998e7b9 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/SimpleLoadManagerImplTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.loadbalance; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -108,7 +109,7 @@ void setup() throws Exception { bkEnsemble.start(); // Start broker 1 - ServiceConfiguration config1 = spy(new ServiceConfiguration()); + ServiceConfiguration config1 = spy(ServiceConfiguration.class); config1.setClusterName("use"); config1.setWebServicePort(Optional.of(0)); config1.setZookeeperServers("127.0.0.1" + ":" + bkEnsemble.getZookeeperPort()); @@ -336,7 +337,7 @@ public void testLoadReportParsing() throws Exception { @Test(enabled = true) public void testDoLoadShedding() throws Exception { - SimpleLoadManagerImpl loadManager = spy(new SimpleLoadManagerImpl(pulsar1)); + SimpleLoadManagerImpl loadManager = spyWithClassAndConstructorArgs(SimpleLoadManagerImpl.class, pulsar1); PulsarResourceDescription rd = new PulsarResourceDescription(); rd.put("memory", new ResourceUsage(1024, 4096)); rd.put("cpu", new ResourceUsage(10, 100)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTaskTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTaskTest.java index 7480989bbb586..9ff266ba96ce0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTaskTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/BundleSplitterTaskTest.java @@ -25,6 +25,7 @@ import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.TimeAverageMessageData; import org.apache.pulsar.broker.loadbalance.LoadData; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats; import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; @@ -94,6 +95,57 @@ public void testSplitTaskWhenTopicJustOne() { Assert.assertEquals(bundlesToSplit.size(), 0); } + @Test + public void testLoadBalancerNamespaceMaximumBundles() throws Exception { + pulsar.getConfiguration().setLoadBalancerNamespaceMaximumBundles(3); + + final BundleSplitterTask bundleSplitterTask = new BundleSplitterTask(); + LoadData loadData = new LoadData(); + + LocalBrokerData brokerData = new LocalBrokerData(); + Map lastStats = new HashMap<>(); + final NamespaceBundleStats namespaceBundleStats = new NamespaceBundleStats(); + namespaceBundleStats.topics = 5; + lastStats.put("ten/ns/0x00000000_0x20000000", namespaceBundleStats); + + final NamespaceBundleStats namespaceBundleStats2 = new NamespaceBundleStats(); + namespaceBundleStats2.topics = 5; + lastStats.put("ten/ns/0x20000000_0x40000000", namespaceBundleStats2); + + final NamespaceBundleStats namespaceBundleStats3 = new NamespaceBundleStats(); + namespaceBundleStats3.topics = 5; + lastStats.put("ten/ns/0x40000000_0x60000000", namespaceBundleStats3); + + brokerData.setLastStats(lastStats); + loadData.getBrokerData().put("broker", new BrokerData(brokerData)); + + BundleData bundleData1 = new BundleData(); + TimeAverageMessageData averageMessageData1 = new TimeAverageMessageData(); + averageMessageData1.setMsgRateIn(pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxMsgRate() * 2); + averageMessageData1.setMsgRateOut(1); + bundleData1.setLongTermData(averageMessageData1); + loadData.getBundleData().put("ten/ns/0x00000000_0x20000000", bundleData1); + + BundleData bundleData2 = new BundleData(); + TimeAverageMessageData averageMessageData2 = new TimeAverageMessageData(); + averageMessageData2.setMsgRateIn(pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxMsgRate() * 2); + averageMessageData2.setMsgRateOut(1); + bundleData2.setLongTermData(averageMessageData2); + loadData.getBundleData().put("ten/ns/0x20000000_0x40000000", bundleData2); + + BundleData bundleData3 = new BundleData(); + TimeAverageMessageData averageMessageData3 = new TimeAverageMessageData(); + averageMessageData3.setMsgRateIn(pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxMsgRate() * 2); + averageMessageData3.setMsgRateOut(1); + bundleData3.setLongTermData(averageMessageData3); + loadData.getBundleData().put("ten/ns/0x40000000_0x60000000", bundleData3); + + int currentBundleCount = pulsar.getNamespaceService().getBundleCount(NamespaceName.get("ten/ns")); + final Set bundlesToSplit = bundleSplitterTask.findBundlesToSplit(loadData, pulsar); + Assert.assertEquals(bundlesToSplit.size() + currentBundleCount, + pulsar.getConfiguration().getLoadBalancerNamespaceMaximumBundles()); + } + @AfterMethod(alwaysRun = true) void shutdown() throws Exception { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java index 716b9716425aa..d23772185f1e7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java @@ -36,7 +36,10 @@ public void testRemoveMostServicingBrokersForNamespace() { String assignedBundle = namespace + "/0x00000000_0x40000000"; Set candidates = Sets.newHashSet(); - ConcurrentOpenHashMap>> map = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap>> map = + ConcurrentOpenHashMap.>>newBuilder() + .build(); LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map); Assert.assertEquals(candidates.size(), 0); @@ -80,8 +83,12 @@ public void testRemoveMostServicingBrokersForNamespace() { private static void fillBrokerToNamespaceToBundleMap( ConcurrentOpenHashMap>> map, String broker, String namespace, String bundle) { - map.computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>()) - .computeIfAbsent(namespace, k -> new ConcurrentOpenHashSet<>()).add(bundle); + map.computeIfAbsent(broker, + k -> ConcurrentOpenHashMap.>newBuilder().build()) + .computeIfAbsent(namespace, + k -> ConcurrentOpenHashSet.newBuilder().build()) + .add(bundle); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/HttpTopicLookupv2Test.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/HttpTopicLookupv2Test.java index b65b084ccdc35..47db73d39f799 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/HttpTopicLookupv2Test.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/HttpTopicLookupv2Test.java @@ -73,7 +73,7 @@ public void setUp() throws Exception { pulsar = mock(PulsarService.class); ns = mock(NamespaceService.class); auth = mock(AuthorizationService.class); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName("use"); clusters = new TreeSet<>(); clusters.add("use"); @@ -102,7 +102,7 @@ public void setUp() throws Exception { @Test public void crossColoLookup() throws Exception { - TopicLookup destLookup = spy(new TopicLookup()); + TopicLookup destLookup = spy(TopicLookup.class); doReturn(false).when(destLookup).isRequestHttps(); destLookup.setPulsar(pulsar); doReturn("null").when(destLookup).clientAppId(); @@ -132,7 +132,7 @@ public void testNotEnoughLookupPermits() throws Exception { BrokerService brokerService = pulsar.getBrokerService(); doReturn(new Semaphore(0)).when(brokerService).getLookupRequestSemaphore(); - TopicLookup destLookup = spy(new TopicLookup()); + TopicLookup destLookup = spy(TopicLookup.class); doReturn(false).when(destLookup).isRequestHttps(); destLookup.setPulsar(pulsar); doReturn("null").when(destLookup).clientAppId(); @@ -170,7 +170,7 @@ public void testValidateReplicationSettingsOnNamespace() throws Exception { // doReturn(Optional.of(policies2)).when(policiesCache) // .get(AdminResource.path(POLICIES, property, cluster, ns2)); - TopicLookup destLookup = spy(new TopicLookup()); + TopicLookup destLookup = spy(TopicLookup.class); doReturn(false).when(destLookup).isRequestHttps(); destLookup.setPulsar(pulsar); doReturn("null").when(destLookup).clientAppId(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/v2/TopicLookupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/v2/TopicLookupTest.java index 317b320a9a4d4..21e7abded4789 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/v2/TopicLookupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/lookup/http/v2/TopicLookupTest.java @@ -46,7 +46,7 @@ public class TopicLookupTest extends PulsarWebResourceTest { @Override protected ResourceConfig configure() { - resource = spy(new TestableTopicLookup()); + resource = spy(TestableTopicLookup.class); return new ResourceConfig().register(resource); } @@ -70,7 +70,7 @@ public void testListenerName() { assertEquals(resource.actualListenerName, "query"); } - private static class TestableTopicLookup extends TopicLookup { + public static class TestableTopicLookup extends TopicLookup { private String actualListenerName; @Override diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java index 8d35cd316b876..1420bc6de9cda 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/NamespaceServiceTest.java @@ -66,7 +66,6 @@ import org.apache.pulsar.common.policies.data.BundlesData; import org.apache.pulsar.common.policies.data.LocalPolicies; import org.apache.pulsar.common.policies.data.Policies; -import org.apache.pulsar.common.policies.data.impl.BundlesDataImpl.BundlesDataImplBuilder; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.metadata.api.GetResult; @@ -74,6 +73,7 @@ import org.apache.pulsar.policies.data.loadbalancer.AdvertisedListener; import org.apache.pulsar.policies.data.loadbalancer.LoadReport; import org.apache.pulsar.policies.data.loadbalancer.LocalBrokerData; +import org.awaitility.Awaitility; import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -492,6 +492,48 @@ public void testRemoveOwnershipAndSplitBundle() throws Exception { } } + + @Test + public void testSplitBundleAndRemoveOldBundleFromOwnerShipCache() throws Exception { + OwnershipCache ownershipCache = spy(pulsar.getNamespaceService().getOwnershipCache()); + doReturn(CompletableFuture.completedFuture(null)).when(ownershipCache).disableOwnership(any(NamespaceBundle.class)); + + Field ownership = NamespaceService.class.getDeclaredField("ownershipCache"); + ownership.setAccessible(true); + ownership.set(pulsar.getNamespaceService(), ownershipCache); + + NamespaceService namespaceService = pulsar.getNamespaceService(); + NamespaceName nsname = NamespaceName.get("pulsar/global/ns1"); + TopicName topicName = TopicName.get("persistent://pulsar/global/ns1/topic-1"); + NamespaceBundles bundles = namespaceService.getNamespaceBundleFactory().getBundles(nsname); + + NamespaceBundle splitBundle1 = bundles.findBundle(topicName); + ownershipCache.tryAcquiringOwnership(splitBundle1); + CompletableFuture result1 = namespaceService.splitAndOwnBundle(splitBundle1, false, NamespaceBundleSplitAlgorithm.RANGE_EQUALLY_DIVIDE_ALGO); + try { + result1.get(); + } catch (Exception e) { + fail("split bundle failed", e); + } + Awaitility.await().untilAsserted(() + -> assertNull(namespaceService.getOwnershipCache().getOwnedBundles().get(splitBundle1))); + + //unload split + bundles = namespaceService.getNamespaceBundleFactory().getBundles(nsname); + assertNotNull(bundles); + NamespaceBundle splitBundle2 = bundles.findBundle(topicName); + CompletableFuture result2 = namespaceService.splitAndOwnBundle(splitBundle2, true, NamespaceBundleSplitAlgorithm.RANGE_EQUALLY_DIVIDE_ALGO); + try { + result2.get(); + } catch (Exception e) { + // make sure: NPE does not occur + fail("split bundle failed", e); + } + Awaitility.await().untilAsserted(() + -> assertNull(namespaceService.getOwnershipCache().getOwnedBundles().get(splitBundle2))); + } + + @Test public void testSplitLargestBundle() throws Exception { String namespace = "prop/test/ns-abc2"; @@ -539,6 +581,14 @@ public void testSplitLargestBundle() throws Exception { } } + @Test + public void testHeartbeatNamespaceMatch() throws Exception { + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), conf); + NamespaceBundle namespaceBundle = pulsar.getNamespaceService().getNamespaceBundleFactory().getFullBundle(namespaceName); + assertTrue(NamespaceService.isSystemServiceNamespace( + NamespaceBundle.getBundleNamespace(namespaceBundle.toString()))); + } + @SuppressWarnings("unchecked") private Pair> splitBundles(NamespaceBundleFactory utilityFactory, NamespaceName nsname, NamespaceBundles bundles, NamespaceBundle targetBundle) throws Exception { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java index 9a8021db28c79..a6604d9639736 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnerShipForCurrentServerTestBase.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.namespace; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; @@ -118,7 +119,7 @@ protected void startBroker() throws Exception { conf.setWebServicePortTls(Optional.of(0)); serviceConfigurationList.add(conf); - PulsarService pulsar = spy(new PulsarService(conf)); + PulsarService pulsar = spyWithClassAndConstructorArgs(PulsarService.class, conf); setupBrokerMocks(pulsar); pulsar.start(); @@ -133,7 +134,8 @@ protected void setupBrokerMocks(PulsarService pulsar) throws Exception { MockZooKeeperSession mockZooKeeperSession = MockZooKeeperSession.newInstance(mockZooKeeper); doReturn(new ZKMetadataStore(mockZooKeeperSession)).when(pulsar).createLocalMetadataStore(); doReturn(new ZKMetadataStore(mockZooKeeperSession)).when(pulsar).createConfigurationMetadataStore(); - Supplier namespaceServiceSupplier = () -> spy(new NamespaceService(pulsar)); + Supplier namespaceServiceSupplier = + () -> spyWithClassAndConstructorArgs(NamespaceService.class, pulsar); doReturn(namespaceServiceSupplier).when(pulsar).getNamespaceServiceProvider(); SameThreadOrderedSafeExecutor executor = new SameThreadOrderedSafeExecutor(); @@ -157,7 +159,7 @@ public static MockZooKeeper createMockZooKeeper() throws Exception { } public static NonClosableMockBookKeeper createMockBookKeeper(OrderedExecutor executor) throws Exception { - return spy(new NonClosableMockBookKeeper(executor)); + return spyWithClassAndConstructorArgs(NonClosableMockBookKeeper.class, executor); } // Prevent the MockBookKeeper instance from being closed when the broker is restarted within a test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java index 143d5ef78f510..dde25fa2eed29 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/namespace/OwnershipCacheTest.java @@ -401,7 +401,7 @@ public void testReestablishOwnership() throws Exception { assertFalse(data3.isDisabled()); assertNotNull(cache.getOwnedBundle(testFullBundle)); - assertTrue(cache.checkOwnership(testFullBundle)); + assertTrue(cache.checkOwnershipAsync(testFullBundle).get()); assertEquals(data2.getNativeUrl(), selfBrokerUrl); assertFalse(data2.isDisabled()); assertNotNull(cache.getOwnedBundle(testFullBundle)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoaderTest.java index 42d26d53f27c6..75d8c646b6bc8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/protocol/ProtocolHandlerWithClassLoaderTest.java @@ -74,6 +74,7 @@ public void testWrapper() throws Exception { verify(h, times(1)).getProtocolDataToAdvertise(); } + @Test public void testClassLoaderSwitcher() throws Exception { NarClassLoader loader = mock(NarClassLoader.class); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/RGUsageMTAggrWaitForAllMesgsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/RGUsageMTAggrWaitForAllMsgsTest.java similarity index 98% rename from pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/RGUsageMTAggrWaitForAllMesgsTest.java rename to pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/RGUsageMTAggrWaitForAllMsgsTest.java index 91260098a3c31..ce3f033a3b0f1 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/RGUsageMTAggrWaitForAllMesgsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/RGUsageMTAggrWaitForAllMsgsTest.java @@ -20,7 +20,6 @@ import com.google.common.collect.Sets; import io.prometheus.client.Summary; -import java.util.Collections; import org.apache.pulsar.broker.resourcegroup.ResourceGroup.BytesAndMessagesCount; import org.apache.pulsar.broker.resourcegroup.ResourceGroup.ResourceGroupMonitoringClass; import org.apache.pulsar.broker.resourcegroup.ResourceGroupService.ResourceGroupUsageStatsType; @@ -59,7 +58,7 @@ // The tenants and namespaces in those topics are associated with a set of resource-groups (RGs). // After sending/receiving all the messages, traffic usage statistics, and Prometheus-metrics // are verified on the RGs. -public class RGUsageMTAggrWaitForAllMesgsTest extends ProducerConsumerBase { +public class RGUsageMTAggrWaitForAllMsgsTest extends ProducerConsumerBase { @BeforeClass @Override protected void setup() throws Exception { @@ -350,13 +349,12 @@ private boolean tenantRGEqualsNamespaceRG(String[] topicStrings) throws PulsarCl } } if ((numEqualRGs + numUnEqualRGs != numTopics) || (numEqualRGs > 0 && numUnEqualRGs > 0)) { - String errMesg = String.format("Found {} topics with equal RGs and {} with unequal, on {} topics", + String errMesg = String.format("Found %s topics with equal RGs and %s with unequal, on %s topics", numEqualRGs, numUnEqualRGs, numTopics); throw new PulsarClientException(errMesg); - } else if (numEqualRGs == numTopics) { - return true; + } else { + return numEqualRGs == numTopics; } - return false; } private void registerTenantsAndNamespaces(String[] topicStrings) throws Exception { @@ -558,7 +556,7 @@ private void verfyRGProdConsStats(String[] topicStrings, log.debug("verfyProdConsStats: topicStatsMap has {} entries", topicStatsMap.size()); - // Pulsar runtime adds some additional bytes in the exchanges: a 45-byte per-message + // Pulsar runtime adds some additional bytes in the exchanges: a 42-byte per-message // metadata of some kind, plus more as the number of messages increases. // Hence the ">=" assertion with ExpectedNumBytesSent/Received in the following checks. final int ExpectedNumBytesSent = sentNumBytes + PER_MESSAGE_METADATA_OHEAD * sentNumMsgs; @@ -788,10 +786,9 @@ private void verifyRGMetrics(String[] topicStrings, Assert.assertNotEquals(ninetethPercentileValue, 0); } - private static final Logger log = LoggerFactory.getLogger(RGUsageMTAggrWaitForAllMesgsTest.class); - - // Empirically, there appears to be a 45-byte overhead for metadata, imposed by Pulsar runtime. - private static final int PER_MESSAGE_METADATA_OHEAD = 45; + private static final Logger log = LoggerFactory.getLogger(RGUsageMTAggrWaitForAllMsgsTest.class); + // Empirically, there appears to be a 42-byte overhead for metadata, imposed by Pulsar runtime. + private static final int PER_MESSAGE_METADATA_OHEAD = 42; private static final int PUBLISH_INTERVAL_SECS = 10; private static final int NUM_PRODUCERS = 4; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupUsageAggregationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupUsageAggregationTest.java index a89d759e7ab29..08c1f6163f253 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupUsageAggregationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceGroupUsageAggregationTest.java @@ -130,8 +130,8 @@ public void acceptResourceUsage(String broker, ResourceUsage resourceUsage) { .subscriptionType(SubscriptionType.Shared) .subscribe(); } catch (PulsarClientException p) { - final String errMesg = String.format("Got exception while building consumer: ex={}", p.getMessage()); - Assert.assertTrue(false, errMesg); + final String errMsg = String.format("Got exception while building consumer: ex=%s", p.getMessage()); + Assert.fail(errMsg); } final TopicName myTopic = TopicName.get(topicString); @@ -146,16 +146,15 @@ public void acceptResourceUsage(String broker, ResourceUsage resourceUsage) { int recvdNumBytes = 0; int recvdNumMsgs = 0; for (int ix = 0; ix < NumMessagesToSend; ix++) { - MessageId prodMesgId = null; byte[] mesg; try { - mesg = String.format("Hi, ix={}", ix).getBytes(); + mesg = String.format("Hi, ix=%s", ix).getBytes(); producer.send(mesg); sentNumBytes += mesg.length; sentNumMsgs++; } catch (PulsarClientException p) { - final String errMesg = String.format("Got exception while sending {}-th time: ex={}", ix, p.getMessage()); - Assert.assertTrue(false, errMesg); + final String errMsg = String.format("Got exception while sending %s-th time: ex=%s", ix, p.getMessage()); + Assert.fail(errMsg); } } producer.close(); @@ -169,9 +168,9 @@ public void acceptResourceUsage(String broker, ResourceUsage resourceUsage) { message = consumer.receive(); recvdNumBytes += message.getValue().length; } catch (PulsarClientException p) { - final String errMesg = String.format("Got exception in while receiving {}-th mesg at consumer: ex={}", + final String errMesg = String.format("Got exception in while receiving %s-th mesg at consumer: ex=%s", recvdNumMsgs, p.getMessage()); - Assert.assertTrue(false, errMesg); + Assert.fail(errMesg); } // log.info("consumer received message : {} {}", message.getMessageId(), new String(message.getData())); recvdNumMsgs++; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java index 1a98838bb4525..af8615936ccfe 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java @@ -112,5 +112,15 @@ public void testRQCalcGlobUsedZeroTest() throws PulsarAdminException { Assert.assertTrue(newQuota == config); } + @Test + public void testNeedToReportLocalUsage() { + // If the percentage change (increase or decrease) in usage is more than 5% for + // either bytes or messages, send a report. + Assert.assertFalse(rqCalc.needToReportLocalUsage(1040, 1000, 104, 100, System.currentTimeMillis())); + Assert.assertFalse(rqCalc.needToReportLocalUsage(950, 1000, 95, 100, System.currentTimeMillis())); + Assert.assertTrue(rqCalc.needToReportLocalUsage(1060, 1000, 106, 100, System.currentTimeMillis())); + Assert.assertTrue(rqCalc.needToReportLocalUsage(940, 1000, 94, 100, System.currentTimeMillis())); + } + private ResourceQuotaCalculatorImpl rqCalc; } \ No newline at end of file diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTransportManagerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTransportManagerTest.java index 7332307a612d2..e8182d77a4900 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTransportManagerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceUsageTransportManagerTest.java @@ -26,7 +26,6 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractSubscriptionTest.java new file mode 100644 index 0000000000000..fbc2ecf8059c4 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractSubscriptionTest.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; + +import java.util.Collections; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class AbstractSubscriptionTest { + private Consumer consumer; + private AbstractSubscription subscription; + + @BeforeMethod + public void beforeMethod() { + Dispatcher dispatcher = mock(Dispatcher.class); + consumer = mock(Consumer.class); + subscription = spy(AbstractSubscription.class); + + when(subscription.getDispatcher()).thenReturn(dispatcher); + when(dispatcher.getConsumers()).thenReturn(Collections.singletonList(consumer)); + } + + @Test + public void testGetMsgOutCounter() { + subscription.msgOutFromRemovedConsumer.add(1L); + when(consumer.getMsgOutCounter()).thenReturn(2L); + assertEquals(subscription.getMsgOutCounter(), 3L); + } + + @Test + public void testGetBytesOutCounter() { + subscription.bytesOutFromRemovedConsumers.add(1L); + when(consumer.getBytesOutCounter()).thenReturn(2L); + assertEquals(subscription.getBytesOutCounter(), 3L); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractTopicTest.java new file mode 100644 index 0000000000000..fb7890dc57f88 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/AbstractTopicTest.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.mockito.Mockito.CALLS_REAL_METHODS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; +import static org.testng.Assert.assertEquals; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class AbstractTopicTest { + private AbstractSubscription subscription; + private AbstractTopic topic; + + @BeforeMethod + public void beforeMethod() { + BrokerService brokerService = mock(BrokerService.class); + PulsarService pulsarService = mock(PulsarService.class); + ServiceConfiguration serviceConfiguration = mock(ServiceConfiguration.class); + BacklogQuotaManager backlogQuotaManager = mock(BacklogQuotaManager.class); + subscription = mock(AbstractSubscription.class); + + when(brokerService.pulsar()).thenReturn(pulsarService); + when(pulsarService.getConfiguration()).thenReturn(serviceConfiguration); + when(brokerService.getBacklogQuotaManager()).thenReturn(backlogQuotaManager); + + topic = mock(AbstractTopic.class, withSettings() + .useConstructor("topic", brokerService) + .defaultAnswer(CALLS_REAL_METHODS)); + + ConcurrentOpenHashMap subscriptions = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + subscriptions.put("subscription", subscription); + when(topic.getSubscriptions()).thenAnswer(invocation -> subscriptions); + } + + @Test + public void testGetMsgOutCounter() { + topic.msgOutFromRemovedSubscriptions.add(1L); + when(subscription.getMsgOutCounter()).thenReturn(2L); + assertEquals(topic.getMsgOutCounter(), 3L); + } + + @Test + public void testGetBytesOutCounter() { + topic.bytesOutFromRemovedSubscriptions.add(1L); + when(subscription.getBytesOutCounter()).thenReturn(2L); + assertEquals(topic.getBytesOutCounter(), 3L); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java index 0dac0c2468ed5..21fcffac1ea21 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BacklogQuotaManagerTest.java @@ -483,6 +483,55 @@ public void testConsumerBacklogEvictionTimeQuota() throws Exception { client.close(); } + @Test + public void testConsumerBacklogEvictionTimeQuotaWithEmptyLedger() throws Exception { + assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), + Maps.newHashMap()); + admin.namespaces().setBacklogQuota("prop/ns-quota", + BacklogQuota.builder() + .limitTime(TIME_TO_CHECK_BACKLOG_QUOTA) + .retentionPolicy(BacklogQuota.RetentionPolicy.consumer_backlog_eviction) + .build(), BacklogQuota.BacklogQuotaType.message_age); + PulsarClient client = PulsarClient.builder().serviceUrl(adminUrl.toString()).statsInterval(0, TimeUnit.SECONDS) + .build(); + + final String topic = "persistent://prop/ns-quota/topic4"; + final String subName = "c1"; + + Consumer consumer = client.newConsumer().topic(topic).subscriptionName(subName).subscribe(); + org.apache.pulsar.client.api.Producer producer = createProducer(client, topic); + producer.send(new byte[1024]); + consumer.receive(); + + admin.topics().unload(topic); + Awaitility.await().until(consumer::isConnected); + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topic); + assertEquals(internalStats.ledgers.size(), 2); + assertEquals(internalStats.ledgers.get(1).entries, 0); + + TopicStats stats = admin.topics().getStats(topic); + assertEquals(stats.getSubscriptions().get(subName).getMsgBacklog(), 1); + + TimeUnit.SECONDS.sleep(TIME_TO_CHECK_BACKLOG_QUOTA); + + Awaitility.await() + .pollInterval(Duration.ofSeconds(1)) + .atMost(Duration.ofSeconds(TIME_TO_CHECK_BACKLOG_QUOTA)) + .untilAsserted(() -> { + rolloverStats(); + + // Cause the last ledger is empty, it is not possible to skip first ledger, + // so the number of ledgers will keep unchanged, and backlog is clear + PersistentTopicInternalStats latestInternalStats = admin.topics().getInternalStats(topic); + assertEquals(latestInternalStats.ledgers.size(), 2); + assertEquals(latestInternalStats.ledgers.get(1).entries, 0); + TopicStats latestStats = admin.topics().getStats(topic); + assertEquals(latestStats.getSubscriptions().get(subName).getMsgBacklog(), 0); + }); + + client.close(); + } + @Test public void testConsumerBacklogEvictionWithAckSizeQuota() throws Exception { assertEquals(admin.namespaces().getBacklogQuotaMap("prop/ns-quota"), @@ -1074,6 +1123,8 @@ public void testProducerExceptionAndThenUnblockSizeQuota() throws Exception { } Thread.sleep((TIME_TO_CHECK_BACKLOG_QUOTA + 1) * 1000); // publish should work now + producer.close(); + producer = createProducer(client, topic1); Exception sendException = null; gotException = false; try { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java index 02fa3e29e5eee..e35d2a0fd6ff7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageTest.java @@ -39,7 +39,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import lombok.Cleanup; import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.BatcherBuilder; import org.apache.pulsar.client.api.CompressionType; @@ -49,9 +51,13 @@ import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; import org.apache.pulsar.client.impl.ConsumerImpl; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.util.FutureUtil; +import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; @@ -63,6 +69,8 @@ @Test(groups = "broker") public class BatchMessageTest extends BrokerTestBase { + private static final Logger log = LoggerFactory.getLogger(BatchMessageTest.class); + @BeforeClass @Override protected void setup() throws Exception { @@ -95,6 +103,15 @@ public Object[][] containerBuilderProvider() { }; } + @DataProvider(name = "testSubTypeAndEnableBatch") + public Object[][] testSubTypeAndEnableBatch() { + return new Object[][] { { SubscriptionType.Shared, Boolean.TRUE }, + { SubscriptionType.Failover, Boolean.TRUE }, + { SubscriptionType.Shared, Boolean.FALSE }, + { SubscriptionType.Failover, Boolean.FALSE } + }; + } + @Test(dataProvider = "codecAndContainerBuilder") public void testSimpleBatchProducerWithFixedBatchSize(CompressionType compressionType, BatcherBuilder builder) throws Exception { int numMsgs = 50; @@ -673,7 +690,7 @@ public void testBatchAndNonBatchCumulativeAcks(BatcherBuilder builder) throws Ex * * @throws Exception */ - @Test(dataProvider = "containerBuilder", timeOut = 3000) + @Test(dataProvider = "containerBuilder") public void testConcurrentBatchMessageAck(BatcherBuilder builder) throws Exception { int numMsgs = 10; final String topicName = "persistent://prop/ns-abc/testConcurrentAck-" + UUID.randomUUID(); @@ -919,5 +936,80 @@ public void testBatchMessageDispatchingAccordingToPermits() throws Exception { consumer1.close(); } + @Test(dataProvider="testSubTypeAndEnableBatch") + private void testDecreaseUnAckMessageCountWithAckReceipt(SubscriptionType subType, + boolean enableBatch) throws Exception { + final int messageCount = 50; + final String topicName = "persistent://prop/ns-abc/testDecreaseWithAckReceipt" + UUID.randomUUID(); + final String subscriptionName = "sub-batch-1"; + @Cleanup + ConsumerImpl consumer = (ConsumerImpl) pulsarClient + .newConsumer(Schema.BYTES) + .topic(topicName) + .isAckReceiptEnabled(true) + .subscriptionName(subscriptionName) + .subscriptionType(subType) + .enableBatchIndexAcknowledgment(true) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient + .newProducer() + .enableBatching(enableBatch) + .topic(topicName) + .batchingMaxPublishDelay(Integer.MAX_VALUE, TimeUnit.MILLISECONDS) + .create(); + + CountDownLatch countDownLatch = new CountDownLatch(messageCount); + for (int i = 0; i < messageCount; i++) { + producer.sendAsync((i + "").getBytes()).thenAccept(msgId -> { + log.info("Published message with msgId: {}", msgId); + countDownLatch.countDown(); + }); + // To generate batch message with different batch size + // 31 total batches, 5 batches with 3 messages, 8 batches with 2 messages and 37 batches with 1 message + if (((i / 3) % (i % 3 + 1)) == 0) { + producer.flush(); + } + } + + countDownLatch.await(); + + for (int i = 0; i < messageCount; i++) { + Message message = consumer.receive(); + if (enableBatch) { + // only ack messages which batch index < 2, which means we will not to ack the + // whole batch for the batch that with more than 2 messages + if (((BatchMessageIdImpl) message.getMessageId()).getBatchIndex() < 2) { + consumer.acknowledgeAsync(message).get(); + } + } else { + if (i % 2 == 0) { + consumer.acknowledgeAsync(message).get(); + } + } + } + + String topic = TopicName.get(topicName).toString(); + PersistentSubscription persistentSubscription = (PersistentSubscription) pulsar.getBrokerService() + .getTopic(topic, false).get().get().getSubscription(subscriptionName); + + Awaitility.await().untilAsserted(() -> { + if (subType == SubscriptionType.Shared) { + if (enableBatch) { + if (conf.isAcknowledgmentAtBatchIndexLevelEnabled()) { + assertEquals(persistentSubscription.getConsumers().get(0).getUnackedMessages(), 5 * 1); + } else { + assertEquals(persistentSubscription.getConsumers().get(0).getUnackedMessages(), 5 * 3); + } + } else { + assertEquals(persistentSubscription.getConsumers().get(0).getUnackedMessages(), messageCount / 2); + } + } else { + assertEquals(persistentSubscription.getConsumers().get(0).getUnackedMessages(), 0); + } + }); + } + private static final Logger LOG = LoggerFactory.getLogger(BatchMessageTest.class); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java new file mode 100644 index 0000000000000..d5c4e1eb064ff --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BatchMessageWithBatchIndexLevelTest.java @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import com.google.common.collect.Lists; +import lombok.Cleanup; +import lombok.SneakyThrows; +import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.common.util.FutureUtil; +import org.awaitility.Awaitility; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; + +@Test(groups = "broker") +public class BatchMessageWithBatchIndexLevelTest extends BatchMessageTest { + + @BeforeClass + @Override + protected void setup() throws Exception { + conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); + super.baseSetup(); + } + + @Test + @SneakyThrows + public void testBatchMessageAck() { + int numMsgs = 40; + final String topicName = "persistent://prop/ns-abc/batchMessageAck-" + UUID.randomUUID(); + final String subscriptionName = "sub-batch-1"; + + @Cleanup + Consumer consumer = pulsarClient + .newConsumer() + .topic(topicName) + .subscriptionName(subscriptionName) + .receiverQueueSize(10) + .subscriptionType(SubscriptionType.Shared) + .enableBatchIndexAcknowledgment(true) + .negativeAckRedeliveryDelay(100, TimeUnit.MILLISECONDS) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient + .newProducer() + .topic(topicName) + .batchingMaxMessages(20) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .enableBatching(true) + .create(); + + List> sendFutureList = Lists.newArrayList(); + for (int i = 0; i < numMsgs; i++) { + byte[] message = ("batch-message-" + i).getBytes(); + sendFutureList.add(producer.newMessage().value(message).sendAsync()); + } + FutureUtil.waitForAll(sendFutureList).get(); + PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); + PersistentDispatcherMultipleConsumers dispatcher = (PersistentDispatcherMultipleConsumers) topic + .getSubscription(subscriptionName).getDispatcher(); + Message receive1 = consumer.receive(); + Message receive2 = consumer.receive(); + consumer.acknowledge(receive1); + consumer.acknowledge(receive2); + Awaitility.await().untilAsserted(() -> { + assertEquals(dispatcher.getConsumers().get(0).getUnackedMessages(), 18); + }); + Message receive3 = consumer.receive(); + Message receive4 = consumer.receive(); + consumer.acknowledge(receive3); + consumer.acknowledge(receive4); + Awaitility.await().untilAsserted(() -> { + assertEquals(dispatcher.getConsumers().get(0).getUnackedMessages(), 16); + }); + Message receive5 = consumer.receive(); + consumer.negativeAcknowledge(receive5); + Awaitility.await().pollInterval(1, TimeUnit.MILLISECONDS).untilAsserted(() -> { + assertEquals(dispatcher.getConsumers().get(0).getUnackedMessages(), 0); + }); + consumer.receive(); + Awaitility.await().untilAsserted(() -> { + assertEquals(dispatcher.getConsumers().get(0).getUnackedMessages(), 16); + }); + } + + @Test + public void testBatchMessageMultiNegtiveAck() throws Exception{ + final String topicName = "persistent://prop/ns-abc/batchMessageMultiNegtiveAck-" + UUID.randomUUID(); + final String subscriptionName = "sub-negtive-1"; + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .receiverQueueSize(10) + .enableBatchIndexAcknowledgment(true) + .negativeAckRedeliveryDelay(100, TimeUnit.MILLISECONDS) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.STRING) + .topic(topicName) + .batchingMaxMessages(20) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .enableBatching(true) + .create(); + + final int N = 20; + for (int i = 0; i < N; i++) { + String value = "test-" + i; + producer.sendAsync(value); + } + producer.flush(); + for (int i = 0; i < N; i++) { + Message msg = consumer.receive(); + if (i % 2 == 0) { + consumer.acknowledgeAsync(msg); + } else { + consumer.negativeAcknowledge(msg); + } + } + Awaitility.await().untilAsserted(() -> { + long unackedMessages = admin.topics().getStats(topicName).getSubscriptions().get(subscriptionName) + .getUnackedMessages(); + assertEquals(unackedMessages, 10); + }); + + // Test negtive ack with sleep + final String topicName2 = "persistent://prop/ns-abc/batchMessageMultiNegtiveAck2-" + UUID.randomUUID(); + final String subscriptionName2 = "sub-negtive-2"; + @Cleanup + Consumer consumer2 = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName2) + .subscriptionName(subscriptionName2) + .subscriptionType(SubscriptionType.Shared) + .receiverQueueSize(10) + .enableBatchIndexAcknowledgment(true) + .negativeAckRedeliveryDelay(100, TimeUnit.MILLISECONDS) + .subscribe(); + @Cleanup + Producer producer2 = pulsarClient + .newProducer(Schema.STRING) + .topic(topicName2) + .batchingMaxMessages(20) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .enableBatching(true) + .create(); + + for (int i = 0; i < N; i++) { + String value = "test-" + i; + producer2.sendAsync(value); + } + producer2.flush(); + for (int i = 0; i < N; i++) { + Message msg = consumer2.receive(); + if (i % 2 == 0) { + consumer.acknowledgeAsync(msg); + } else { + consumer.negativeAcknowledge(msg); + Thread.sleep(100); + } + } + Awaitility.await().untilAsserted(() -> { + long unackedMessages = admin.topics().getStats(topicName).getSubscriptions().get(subscriptionName) + .getUnackedMessages(); + assertEquals(unackedMessages, 10); + }); + } + + @Test + public void testAckMessageWithNotOwnerConsumerUnAckMessageCount() throws Exception { + final String subName = "test"; + final String topicName = "persistent://prop/ns-abc/testAckMessageWithNotOwnerConsumerUnAckMessageCount-" + + UUID.randomUUID(); + + @Cleanup + Producer producer = pulsarClient + .newProducer() + .topic(topicName) + .batchingMaxPublishDelay(1, TimeUnit.SECONDS) + .enableBatching(true) + .create(); + + @Cleanup + Consumer consumer1 = pulsarClient + .newConsumer() + .topic(topicName) + .consumerName("consumer-1") + .negativeAckRedeliveryDelay(1, TimeUnit.SECONDS) + .isAckReceiptEnabled(true) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .enableBatchIndexAcknowledgment(true) + .subscribe(); + + @Cleanup + Consumer consumer2 = pulsarClient + .newConsumer() + .topic(topicName) + .consumerName("consumer-2") + .negativeAckRedeliveryDelay(1, TimeUnit.SECONDS) + .isAckReceiptEnabled(true) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .enableBatchIndexAcknowledgment(true) + .subscribe(); + + for (int i = 0; i < 5; i++) { + producer.newMessage().value(("Hello Pulsar - " + i).getBytes()).sendAsync(); + } + + // consume-1 receive 5 batch messages + List list = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + list.add(consumer1.receive().getMessageId()); + } + + // consumer-1 redeliver the batch messages + consumer1.negativeAcknowledge(list.get(0)); + + // consumer-2 will receive the messages that the consumer-1 redelivered + for (int i = 0; i < 5; i++) { + consumer2.receive().getMessageId(); + } + + // consumer1 ack two messages in the batch message + consumer1.acknowledge(list.get(1)); + consumer1.acknowledge(list.get(2)); + + // consumer-2 redeliver the rest of the messages + consumer2.negativeAcknowledge(list.get(1)); + + // consume-1 close will redeliver the rest messages to consumer-2 + consumer1.close(); + + // consumer-2 can receive the rest of 3 messages + for (int i = 0; i < 3; i++) { + consumer2.acknowledge(consumer2.receive().getMessageId()); + } + + // consumer-2 can't receive any messages, all the messages in batch has been acked + Message message = consumer2.receive(1, TimeUnit.SECONDS); + assertNull(message); + + // the number of consumer-2's unacked messages is 0 + Awaitility.await().until(() -> getPulsar().getBrokerService().getTopic(topicName, false) + .get().get().getSubscription(subName).getConsumers().get(0).getUnackedMessages() == 0); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerEntryMetadataE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerEntryMetadataE2ETest.java index d2ea2f1fc261f..49b4742b71dad 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerEntryMetadataE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerEntryMetadataE2ETest.java @@ -18,18 +18,26 @@ */ package org.apache.pulsar.broker.service; +import static org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo; + +import java.time.Duration; import java.util.List; import java.util.concurrent.TimeUnit; import lombok.Cleanup; +import org.apache.bookkeeper.mledger.ManagedCursor; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.common.api.proto.BrokerEntryMetadata; import org.assertj.core.util.Sets; +import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; @@ -354,4 +362,41 @@ public void testConsumerGetBrokerEntryMetadataForBatchMessage() throws Exception producer.close(); consumer.close(); } + + @Test + public void testManagedLedgerTotalSize() throws Exception { + final String topic = newTopicName(); + final int messages = 10; + + admin.topics().createNonPartitionedTopic(topic); + admin.lookups().lookupTopic(topic); + final ManagedLedgerImpl managedLedger = pulsar.getBrokerService().getTopicIfExists(topic).get() + .map(topicObject -> (ManagedLedgerImpl) ((PersistentTopic) topicObject).getManagedLedger()) + .orElse(null); + Assert.assertNotNull(managedLedger); + final ManagedCursor cursor = managedLedger.openCursor("cursor"); // prevent ledgers being removed + + @Cleanup + final Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + for (int i = 0; i < messages; i++) { + producer.send("msg-" + i); + } + + Assert.assertTrue(managedLedger.getTotalSize() > 0); + + managedLedger.getConfig().setMinimumRolloverTime(0, TimeUnit.MILLISECONDS); + managedLedger.getConfig().setMaxEntriesPerLedger(1); + managedLedger.rollCurrentLedgerIfFull(); + + Awaitility.await().atMost(Duration.ofSeconds(3)) + .until(() -> managedLedger.getLedgersInfo().size() > 1); + + final List ledgerInfoList = managedLedger.getLedgersInfoAsList(); + Assert.assertEquals(ledgerInfoList.size(), 2); + Assert.assertEquals(ledgerInfoList.get(0).getSize(), managedLedger.getTotalSize()); + + cursor.close(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerInternalClientConfigurationOverrideTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerInternalClientConfigurationOverrideTest.java new file mode 100644 index 0000000000000..775636c9489fb --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerInternalClientConfigurationOverrideTest.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import org.apache.pulsar.broker.PulsarServerException; +import org.apache.pulsar.client.admin.internal.PulsarAdminImpl; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import java.util.Optional; +import java.util.Properties; + +public class BrokerInternalClientConfigurationOverrideTest extends BrokerTestBase { + + @BeforeClass + @Override + protected void setup() throws Exception { + super.baseSetup(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testPulsarServiceAdminClientConfiguration() throws PulsarServerException { + Properties config = pulsar.getConfiguration().getProperties(); + config.setProperty("brokerClient_operationTimeoutMs", "60000"); + config.setProperty("brokerClient_statsIntervalSeconds", "10"); + ClientConfigurationData clientConf = ((PulsarAdminImpl) pulsar.getAdminClient()).getClientConfigData(); + Assert.assertEquals(clientConf.getOperationTimeoutMs(), 60000); + Assert.assertEquals(clientConf.getStatsIntervalSeconds(), 10); + } + + @Test + public void testPulsarServicePulsarClientConfiguration() throws PulsarServerException { + Properties config = pulsar.getConfiguration().getProperties(); + config.setProperty("brokerClient_operationTimeoutMs", "60000"); + config.setProperty("brokerClient_statsIntervalSeconds", "10"); + pulsar.getConfiguration().setBrokerClientAuthenticationParameters("sensitive"); + ClientConfigurationData clientConf = ((PulsarClientImpl) pulsar.getClient()).getConfiguration(); + Assert.assertEquals(clientConf.getOperationTimeoutMs(), 60000); + // Config should override internal default, which is 0. + Assert.assertEquals(clientConf.getStatsIntervalSeconds(), 10); + Assert.assertEquals(clientConf.getAuthParams(), "sensitive"); + } + + @Test + public void testBrokerServicePulsarClientConfiguration() { + // This data only needs to have the service url for this test. + ClusterData data = ClusterData.builder().serviceUrl("http://localhost:8080").build(); + + // Set the configs and set some configs that won't apply + Properties config = pulsar.getConfiguration().getProperties(); + config.setProperty("brokerClient_operationTimeoutMs", "60000"); + config.setProperty("brokerClient_statsIntervalSeconds", "10"); + config.setProperty("memoryLimitBytes", "10"); + config.setProperty("brokerClient_memoryLimitBytes", "100000"); + + PulsarClientImpl client = (PulsarClientImpl) pulsar.getBrokerService() + .getReplicationClient("an_arbitrary_name", Optional.of(data)); + ClientConfigurationData clientConf = client.getConfiguration(); + Assert.assertEquals(clientConf.getOperationTimeoutMs(), 60000); + // Config should override internal default, which is 0. + Assert.assertEquals(clientConf.getStatsIntervalSeconds(), 10); + // This config defaults to 0 (for good reason), but it could be overridden by configuration. + Assert.assertEquals(clientConf.getMemoryLimitBytes(), 100000); + } + + @Test + public void testNamespaceServicePulsarClientConfiguration() { + // This data only needs to have the service url for this test. + ClusterDataImpl data = (ClusterDataImpl) ClusterData.builder().serviceUrl("http://localhost:8080").build(); + + // Set the configs and set some configs that won't apply + Properties config = pulsar.getConfiguration().getProperties(); + config.setProperty("brokerClient_operationTimeoutMs", "60000"); + config.setProperty("brokerClient_statsIntervalSeconds", "10"); + config.setProperty("memoryLimitBytes", "10"); + config.setProperty("brokerClient_memoryLimitBytes", "100000"); + + PulsarClientImpl client = pulsar.getNamespaceService().getNamespaceClient(data); + ClientConfigurationData clientConf = client.getConfiguration(); + Assert.assertEquals(clientConf.getOperationTimeoutMs(), 60000); + // Config should override internal default, which is 0. + Assert.assertEquals(clientConf.getStatsIntervalSeconds(), 10); + // This config defaults to 0 (for good reason), but it could be overridden by configuration. + Assert.assertEquals(clientConf.getMemoryLimitBytes(), 100000); + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoSubscriptionCreationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoSubscriptionCreationTest.java index c635968907ab5..dc4b3d9d51d88 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoSubscriptionCreationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoSubscriptionCreationTest.java @@ -26,6 +26,7 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AutoSubscriptionCreationOverride; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; @@ -98,10 +99,13 @@ public void testAutoSubscriptionCreationNamespaceAllowOverridesBroker() throws E admin.topics().createNonPartitionedTopic(topicName.toString()); pulsar.getConfiguration().setAllowAutoSubscriptionCreation(false); + AutoSubscriptionCreationOverride autoSubscriptionCreationOverride = AutoSubscriptionCreationOverride.builder() + .allowAutoSubscriptionCreation(true) + .build(); pulsar.getAdminClient().namespaces().setAutoSubscriptionCreation(topicName.getNamespace(), - AutoSubscriptionCreationOverride.builder() - .allowAutoSubscriptionCreation(true) - .build()); + autoSubscriptionCreationOverride); + Assert.assertEquals(pulsar.getAdminClient().namespaces().getAutoSubscriptionCreation(topicName.getNamespace()), + autoSubscriptionCreationOverride); // Subscribe operation should be successful pulsarClient.newConsumer().topic(topicName.toString()).subscriptionName(subscriptionName).subscribe(); @@ -117,10 +121,13 @@ public void testAutoSubscriptionCreationNamespaceDisallowOverridesBroker() throw admin.topics().createNonPartitionedTopic(topicName.toString()); pulsar.getConfiguration().setAllowAutoSubscriptionCreation(true); + AutoSubscriptionCreationOverride autoSubscriptionCreationOverride = AutoSubscriptionCreationOverride.builder() + .allowAutoSubscriptionCreation(false) + .build(); pulsar.getAdminClient().namespaces().setAutoSubscriptionCreation(topicName.getNamespace(), - AutoSubscriptionCreationOverride.builder() - .allowAutoSubscriptionCreation(false) - .build()); + autoSubscriptionCreationOverride); + Assert.assertEquals(pulsar.getAdminClient().namespaces().getAutoSubscriptionCreation(topicName.getNamespace()), + autoSubscriptionCreationOverride); try { pulsarClient.newConsumer().topic(topicName.toString()).subscriptionName(subscriptionName).subscribe(); @@ -131,4 +138,18 @@ public void testAutoSubscriptionCreationNamespaceDisallowOverridesBroker() throw assertFalse(admin.topics().getSubscriptions(topicName.toString()).contains(subscriptionName)); } + @Test + public void testNonPersistentTopicSubscriptionCreationWithAutoCreationDisable() throws Exception { + pulsar.getConfiguration().setAllowAutoSubscriptionCreation(false); + + final String topicName = "non-persistent://prop/ns-abc/test-subtopic-" + testId.getAndIncrement(); + final String subscriptionName = "test-subtopic-sub"; + + admin.topics().createNonPartitionedTopic(topicName); + + // Subscribe operation should be successful + pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscribe(); + assertTrue(admin.topics().getSubscriptions(topicName).contains(subscriptionName)); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java index 7b749339e1181..1b3ca1616fa61 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceAutoTopicCreationTest.java @@ -29,6 +29,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; import org.apache.pulsar.common.policies.data.TopicType; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; @@ -168,11 +169,13 @@ public void testAutoCreationNamespaceAllowOverridesBroker() throws Exception { final String subscriptionName = "test-topic-sub-4"; final TopicName topicName = TopicName.get(topicString); pulsar.getConfiguration().setAllowAutoTopicCreation(false); - pulsar.getAdminClient().namespaces().setAutoTopicCreation(topicName.getNamespace(), - AutoTopicCreationOverride.builder() - .allowAutoTopicCreation(true) - .topicType(TopicType.NON_PARTITIONED.toString()) - .build()); + AutoTopicCreationOverride autoTopicCreationOverride = AutoTopicCreationOverride.builder() + .allowAutoTopicCreation(true) + .topicType(TopicType.NON_PARTITIONED.toString()) + .build(); + pulsar.getAdminClient().namespaces().setAutoTopicCreation(topicName.getNamespace(), autoTopicCreationOverride); + Assert.assertEquals(pulsar.getAdminClient().namespaces().getAutoTopicCreation(topicName.getNamespace()), + autoTopicCreationOverride); pulsarClient.newConsumer().topic(topicString).subscriptionName(subscriptionName).subscribe(); assertTrue(admin.namespaces().getTopics("prop/ns-abc").contains(topicString)); @@ -185,10 +188,12 @@ public void testAutoCreationNamespaceDisallowOverridesBroker() throws Exception final String subscriptionName = "test-topic-sub-5"; final TopicName topicName = TopicName.get(topicString); pulsar.getConfiguration().setAllowAutoTopicCreation(true); - pulsar.getAdminClient().namespaces().setAutoTopicCreation(topicName.getNamespace(), - AutoTopicCreationOverride.builder() - .allowAutoTopicCreation(false) - .build()); + AutoTopicCreationOverride autoTopicCreationOverride = AutoTopicCreationOverride.builder() + .allowAutoTopicCreation(false) + .build(); + pulsar.getAdminClient().namespaces().setAutoTopicCreation(topicName.getNamespace(), autoTopicCreationOverride); + Assert.assertEquals(pulsar.getAdminClient().namespaces().getAutoTopicCreation(topicName.getNamespace()), + autoTopicCreationOverride); try { pulsarClient.newConsumer().topic(topicString).subscriptionName(subscriptionName).subscribe(); @@ -205,12 +210,14 @@ public void testAutoCreationNamespaceOverrideAllowsPartitionedTopics() throws Ex final TopicName topicName = TopicName.get(topicString); pulsar.getConfiguration().setAllowAutoTopicCreation(false); - pulsar.getAdminClient().namespaces().setAutoTopicCreation(topicName.getNamespace(), - AutoTopicCreationOverride.builder() - .allowAutoTopicCreation(true) - .topicType(TopicType.PARTITIONED.toString()) - .defaultNumPartitions(4) - .build()); + AutoTopicCreationOverride autoTopicCreationOverride = AutoTopicCreationOverride.builder() + .allowAutoTopicCreation(true) + .topicType(TopicType.PARTITIONED.toString()) + .defaultNumPartitions(4) + .build(); + pulsar.getAdminClient().namespaces().setAutoTopicCreation(topicName.getNamespace(), autoTopicCreationOverride); + Assert.assertEquals(pulsar.getAdminClient().namespaces().getAutoTopicCreation(topicName.getNamespace()), + autoTopicCreationOverride); final String subscriptionName = "test-topic-sub-6"; pulsarClient.newConsumer().topic(topicString).subscriptionName(subscriptionName).subscribe(); @@ -350,6 +357,21 @@ public void testNotAllowSubscriptionTopicCreation() throws Exception{ } + @Test + public void testAutoCreationNamespaceOverridesSubscriptionTopicCreation() throws Exception { + pulsar.getConfiguration().setAllowAutoTopicCreation(false); + String topicString = "persistent://prop/ns-abc/non-partitioned-topic" + System.currentTimeMillis(); + String subscriptionName = "non-partitioned-topic-sub"; + final TopicName topicName = TopicName.get(topicString); + pulsar.getAdminClient().namespaces().setAutoTopicCreation(topicName.getNamespace(), + AutoTopicCreationOverride.builder() + .allowAutoTopicCreation(true) + .topicType(TopicType.NON_PARTITIONED.toString()) + .build()); + + admin.topics().createSubscription(topicString, subscriptionName, MessageId.earliest); + } + @Test public void testMaxNumPartitionsPerPartitionedTopicTopicCreation() { pulsar.getConfiguration().setAllowAutoTopicCreation(true); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java index af8ddb583a58f..c7ffef1a97b7e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/BrokerServiceTest.java @@ -18,10 +18,13 @@ */ package org.apache.pulsar.broker.service; +import static org.apache.pulsar.common.naming.TopicName.TRANSACTION_COORDINATOR_ASSIGN; +import static org.apache.pulsar.common.naming.TopicName.TRANSACTION_COORDINATOR_LOG; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; @@ -66,6 +69,7 @@ import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerServiceException.PersistenceException; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.stats.prometheus.PrometheusRawMetricsProvider; @@ -723,6 +727,7 @@ public void testTlsEnabledWithoutNonTlsServicePorts() throws Exception { } finally { pulsarClient.close(); } + resetState(); } @SuppressWarnings("deprecation") @@ -1275,4 +1280,33 @@ public void shouldNotPreventCreatingTopicWhenNonexistingTopicIsCached() throws E getStatsThread.join(); } } + + @Test + public void testIsSystemTopic() { + BrokerService brokerService = pulsar.getBrokerService(); + assertFalse(brokerService.isSystemTopic(TopicName.get("test"))); + assertFalse(brokerService.isSystemTopic(TopicName.get("public/default/test"))); + assertFalse(brokerService.isSystemTopic(TopicName.get("healthcheck"))); + assertFalse(brokerService.isSystemTopic(TopicName.get("public/default/healthcheck"))); + assertFalse(brokerService.isSystemTopic(TopicName.get("persistent://public/default/test"))); + assertFalse(brokerService.isSystemTopic(TopicName.get("non-persistent://public/default/test"))); + + assertTrue(brokerService.isSystemTopic(TopicName.get("__change_events"))); + assertTrue(brokerService.isSystemTopic(TopicName.get("__change_events-partition-0"))); + assertTrue(brokerService.isSystemTopic(TopicName.get("__change_events-partition-1"))); + assertTrue(brokerService.isSystemTopic(TopicName.get("__transaction_buffer_snapshot"))); + assertTrue(brokerService.isSystemTopic(TopicName.get("__transaction_buffer_snapshot-partition-0"))); + assertTrue(brokerService.isSystemTopic(TopicName.get("__transaction_buffer_snapshot-partition-1"))); + assertTrue(brokerService.isSystemTopic(TopicName + .get("topicxxx-partition-0-multiTopicsReader-f433329d68__transaction_pending_ack"))); + assertTrue(brokerService.isSystemTopic( + TopicName.get("topicxxx-multiTopicsReader-f433329d68__transaction_pending_ack"))); + + assertTrue(brokerService.isSystemTopic(TRANSACTION_COORDINATOR_ASSIGN)); + assertTrue(brokerService.isSystemTopic(TRANSACTION_COORDINATOR_LOG)); + NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + assertTrue(brokerService.isSystemTopic("persistent://" + heartbeatNamespaceV1.toString() + "/healthcheck")); + assertTrue(brokerService.isSystemTopic(heartbeatNamespaceV2.toString() + "/healthcheck")); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsumerTest.java new file mode 100644 index 0000000000000..65dc4336a0ece --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ConsumerTest.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static java.util.Collections.emptyMap; +import static org.apache.pulsar.client.api.MessageId.latest; +import static org.apache.pulsar.common.api.proto.CommandSubscribe.SubType.Exclusive; +import static org.apache.pulsar.common.api.proto.KeySharedMode.AUTO_SPLIT; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import java.net.SocketAddress; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.common.api.proto.CommandSubscribe; +import org.apache.pulsar.common.api.proto.KeySharedMeta; +import org.apache.pulsar.common.policies.data.stats.ConsumerStatsImpl; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class ConsumerTest { + private Consumer consumer; + private final ConsumerStatsImpl stats = new ConsumerStatsImpl(); + + @BeforeMethod + public void beforeMethod() { + Subscription subscription = mock(Subscription.class); + ServerCnx cnx = mock(ServerCnx.class); + SocketAddress address = mock(SocketAddress.class); + Topic topic = mock(Topic.class); + BrokerService brokerService = mock(BrokerService.class); + PulsarService pulsarService = mock(PulsarService.class); + ServiceConfiguration serviceConfiguration = mock(ServiceConfiguration.class); + + when(cnx.clientAddress()).thenReturn(address); + when(subscription.getTopic()).thenReturn(topic); + when(topic.getBrokerService()).thenReturn(brokerService); + when(brokerService.getPulsar()).thenReturn(pulsarService); + when(pulsarService.getConfiguration()).thenReturn(serviceConfiguration); + + consumer = new Consumer(subscription, Exclusive, "topic", 1, 0, "Cons1", 1, cnx, "myrole-1", emptyMap(), false, + CommandSubscribe.InitialPosition.Earliest, new KeySharedMeta().setKeySharedMode(AUTO_SPLIT), latest); + } + + @Test + public void testGetMsgOutCounter() { + stats.msgOutCounter = 1L; + consumer.updateStats(stats); + assertEquals(consumer.getMsgOutCounter(), 1L); + } + + @Test + public void testGetBytesOutCounter() { + stats.bytesOutCounter = 1L; + consumer.updateStats(stats); + assertEquals(consumer.getBytesOutCounter(), 1L); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/CurrentLedgerRolloverIfFullTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/CurrentLedgerRolloverIfFullTest.java index 77ec229862e90..b05abf3be5218 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/CurrentLedgerRolloverIfFullTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/CurrentLedgerRolloverIfFullTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service; +import java.lang.reflect.Field; import java.time.Duration; import java.util.concurrent.TimeUnit; import lombok.Cleanup; @@ -98,6 +99,9 @@ public void testCurrentLedgerRolloverIfFull() throws Exception { }); // trigger a ledger rollover + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + stateUpdater.set(managedLedger, ManagedLedgerImpl.State.LedgerOpened); managedLedger.rollCurrentLedgerIfFull(); // the last ledger will be closed and removed and we have one ledger for empty diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ExclusiveProducerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ExclusiveProducerTest.java index 04da55dab2141..4d2b16a6a0e75 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ExclusiveProducerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ExclusiveProducerTest.java @@ -25,6 +25,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import io.netty.util.HashedWheelTimer; import lombok.Cleanup; import org.apache.pulsar.client.api.Producer; @@ -33,8 +34,11 @@ import org.apache.pulsar.client.api.PulsarClientException.ProducerBusyException; import org.apache.pulsar.client.api.PulsarClientException.ProducerFencedException; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.naming.TopicName; +import org.awaitility.Awaitility; import org.powermock.reflect.Whitebox; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; @@ -117,6 +121,30 @@ private void simpleTest(String topic) throws Exception { p2.close(); } + @Test(dataProvider = "topics") + public void testProducerTasksCleanupWhenUsingExclusiveProducers(String type, boolean partitioned) throws Exception { + String topic = newTopic(type, partitioned); + Producer p1 = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .accessMode(ProducerAccessMode.Exclusive) + .create(); + + try { + pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .accessMode(ProducerAccessMode.Exclusive) + .create(); + fail("Should have failed"); + } catch (ProducerFencedException e) { + // Expected + } + + p1.close(); + + HashedWheelTimer timer = (HashedWheelTimer) ((PulsarClientImpl) pulsarClient).timer(); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(timer.pendingTimeouts(), 0)); + } + @Test(dataProvider = "topics") public void existingSharedProducer(String type, boolean partitioned) throws Exception { String topic = newTopic(type, partitioned); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java index 94ac6e8b4afc8..1a2dd423b904e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/InactiveTopicDeleteTest.java @@ -34,10 +34,14 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.naming.TopicVersion; import org.apache.pulsar.common.policies.data.InactiveTopicDeleteMode; import org.apache.pulsar.common.policies.data.InactiveTopicPolicies; import org.apache.pulsar.zookeeper.ZooKeeperManagedLedgerCache; @@ -576,4 +580,34 @@ public void testInactiveTopicApplied() throws Exception { Awaitility.await().untilAsserted(() -> assertEquals(admin.topics().getInactiveTopicPolicies(topic, true), brokerLevelPolicy)); } + + @Test(timeOut = 30000) + public void testHealthTopicInactiveNotClean() throws Exception { + conf.setSystemTopicEnabled(true); + conf.setBrokerDeleteInactiveTopicsMode(InactiveTopicDeleteMode.delete_when_no_subscriptions); + conf.setBrokerDeleteInactiveTopicsFrequencySeconds(1); + super.baseSetup(); + // init topic + NamespaceName heartbeatNamespaceV1 = NamespaceService.getHeartbeatNamespace(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + final String healthCheckTopicV1 = "persistent://" + heartbeatNamespaceV1 + "/healthcheck"; + + NamespaceName heartbeatNamespaceV2 = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), pulsar.getConfig()); + final String healthCheckTopicV2 = "persistent://" + heartbeatNamespaceV2 + "/healthcheck"; + + admin.brokers().healthcheck(TopicVersion.V1); + admin.brokers().healthcheck(TopicVersion.V2); + + List V1Partitions = pulsar + .getPulsarResources() + .getTopicResources() + .getExistingPartitions(TopicName.get(healthCheckTopicV1)) + .get(10, TimeUnit.SECONDS); + List V2Partitions = pulsar + .getPulsarResources() + .getTopicResources() + .getExistingPartitions(TopicName.get(healthCheckTopicV2)) + .get(10, TimeUnit.SECONDS); + Assert.assertTrue(V1Partitions.contains(healthCheckTopicV1)); + Assert.assertTrue(V2Partitions.contains(healthCheckTopicV2)); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/MessageCumulativeAckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/MessageCumulativeAckTest.java new file mode 100644 index 0000000000000..d45054fab7939 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/MessageCumulativeAckTest.java @@ -0,0 +1,194 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static java.util.Collections.emptyMap; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.common.api.proto.CommandAck.AckType.Cumulative; +import static org.apache.pulsar.common.api.proto.CommandSubscribe.SubType.Exclusive; +import static org.apache.pulsar.common.api.proto.CommandSubscribe.SubType.Failover; +import static org.apache.pulsar.common.api.proto.CommandSubscribe.SubType.Key_Shared; +import static org.apache.pulsar.common.api.proto.CommandSubscribe.SubType.Shared; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import java.net.InetSocketAddress; +import org.apache.bookkeeper.common.util.OrderedExecutor; +import org.apache.bookkeeper.mledger.ManagedLedger; +import org.apache.bookkeeper.mledger.ManagedLedgerFactory; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.resources.PulsarResources; +import org.apache.pulsar.broker.service.persistent.PersistentSubscription; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.common.api.proto.CommandAck; +import org.apache.pulsar.common.api.proto.CommandSubscribe; +import org.apache.pulsar.common.api.proto.ProtocolVersion; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.metadata.api.MetadataStore; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.MetadataStoreFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class MessageCumulativeAckTest { + private final int consumerId = 1; + private BrokerService brokerService; + private ServerCnx serverCnx; + private MetadataStore store; + protected PulsarService pulsar; + private OrderedExecutor executor; + private EventLoopGroup eventLoopGroup; + private PersistentSubscription sub; + + @BeforeMethod + public void setup() throws Exception { + executor = OrderedExecutor.newBuilder().numThreads(1).name("persistent-dispatcher-cumulative-ack-test").build(); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); + svcConfig.setBrokerShutdownTimeoutMs(0L); + svcConfig.setLoadBalancerOverrideBrokerNicSpeedGbps(1.0d); + svcConfig.setClusterName("pulsar-cluster"); + pulsar = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); + doReturn(svcConfig).when(pulsar).getConfiguration(); + + ManagedLedgerFactory mlFactoryMock = mock(ManagedLedgerFactory.class); + doReturn(mlFactoryMock).when(pulsar).getManagedLedgerFactory(); + doReturn(TransactionTestBase.createMockBookKeeper(executor)) + .when(pulsar).getBookKeeperClient(); + + store = MetadataStoreFactory.create("memory://local", MetadataStoreConfig.builder().build()); + doReturn(store).when(pulsar).getLocalMetadataStore(); + doReturn(store).when(pulsar).getConfigurationMetadataStore(); + + PulsarResources pulsarResources = new PulsarResources(store, store); + doReturn(pulsarResources).when(pulsar).getPulsarResources(); + + serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + doReturn(true).when(serverCnx).isActive(); + doReturn(true).when(serverCnx).isWritable(); + doReturn(new InetSocketAddress("localhost", 1234)).when(serverCnx).clientAddress(); + when(serverCnx.getRemoteEndpointProtocolVersion()).thenReturn(ProtocolVersion.v12.getValue()); + when(serverCnx.ctx()).thenReturn(mock(ChannelHandlerContext.class)); + doReturn(new PulsarCommandSenderImpl(null, serverCnx)) + .when(serverCnx).getCommandSender(); + + eventLoopGroup = new NioEventLoopGroup(); + brokerService = spyWithClassAndConstructorArgs(BrokerService.class, pulsar, eventLoopGroup); + doReturn(brokerService).when(pulsar).getBrokerService(); + + String topicName = TopicName.get("MessageCumulativeAckTest").toString(); + PersistentTopic persistentTopic = new PersistentTopic(topicName, mock(ManagedLedger.class), brokerService); + sub = spy(new PersistentSubscription(persistentTopic, "sub-1", + mock(ManagedCursorImpl.class), false)); + doNothing().when(sub).acknowledgeMessage(any(), any(), any()); + } + + @AfterMethod(alwaysRun = true) + public void shutdown() throws Exception { + if (brokerService != null) { + brokerService.close(); + brokerService = null; + } + if (pulsar != null) { + pulsar.close(); + pulsar = null; + } + + executor.shutdown(); + if (eventLoopGroup != null) { + eventLoopGroup.shutdownGracefully().get(); + } + store.close(); + sub = null; + } + + @DataProvider(name = "individualAckModes") + public static Object[][] individualAckModes() { + return new Object[][]{ + {Shared}, + {Key_Shared}, + }; + } + + @DataProvider(name = "notIndividualAckModes") + public static Object[][] notIndividualAckModes() { + return new Object[][]{ + {Exclusive}, + {Failover}, + }; + } + + @Test(timeOut = 5000, dataProvider = "individualAckModes") + public void testAckWithIndividualAckMode(CommandSubscribe.SubType subType) throws Exception { + Consumer consumer = new Consumer(sub, subType, "topic-1", consumerId, 0, + "Cons1", 50000, serverCnx, "myrole-1", emptyMap(), false, CommandSubscribe.InitialPosition.Latest, null, MessageId.latest); + + CommandAck commandAck = new CommandAck(); + commandAck.setAckType(Cumulative); + commandAck.setConsumerId(consumerId); + commandAck.addMessageId().setEntryId(0L).setLedgerId(1L); + + consumer.messageAcked(commandAck).get(); + verify(sub, never()).acknowledgeMessage(any(), any(), any()); + } + + @Test(timeOut = 5000, dataProvider = "notIndividualAckModes") + public void testAckWithNotIndividualAckMode(CommandSubscribe.SubType subType) throws Exception { + Consumer consumer = new Consumer(sub, subType, "topic-1", consumerId, 0, + "Cons1", 50000, serverCnx, "myrole-1", emptyMap(), false, CommandSubscribe.InitialPosition.Latest, null, MessageId.latest); + + CommandAck commandAck = new CommandAck(); + commandAck.setAckType(Cumulative); + commandAck.setConsumerId(consumerId); + commandAck.addMessageId().setEntryId(0L).setLedgerId(1L); + + consumer.messageAcked(commandAck).get(); + verify(sub, times(1)).acknowledgeMessage(any(), any(), any()); + } + + @Test(timeOut = 5000) + public void testAckWithMoreThanNoneMessageIds() throws Exception { + Consumer consumer = new Consumer(sub, Failover, "topic-1", consumerId, 0, + "Cons1", 50000, serverCnx, "myrole-1", emptyMap(), false, CommandSubscribe.InitialPosition.Latest, null, MessageId.latest); + + CommandAck commandAck = new CommandAck(); + commandAck.setAckType(Cumulative); + commandAck.setConsumerId(consumerId); + commandAck.addMessageId().setEntryId(0L).setLedgerId(1L); + commandAck.addMessageId().setEntryId(0L).setLedgerId(2L); + + consumer.messageAcked(commandAck).get(); + verify(sub, never()).acknowledgeMessage(any(), any(), any()); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/NonPersistentTopicE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/NonPersistentTopicE2ETest.java index 1543d3e384469..9525944adc74e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/NonPersistentTopicE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/NonPersistentTopicE2ETest.java @@ -52,6 +52,8 @@ public class NonPersistentTopicE2ETest extends BrokerTestBase { @BeforeMethod(alwaysRun = true) @Override protected void setup() throws Exception { + conf.setBrokerDeleteInactivePartitionedTopicMetadataEnabled(true); + conf.setBrokerDeleteInactiveTopicsFrequencySeconds(1); super.baseSetup(); } @@ -228,5 +230,17 @@ public void testGC() throws Exception { producer2.close(); assertTrue(pulsar.getBrokerService().getTopicReference(topicName).isPresent()); + + // 6. Test for partitioned topic to delete the partitioned metadata + String topicGc = "non-persistent://prop/ns-abc/topic-gc"; + int partitions = 5; + admin.topics().createPartitionedTopic(topicGc, partitions); + Producer producer3 = pulsarClient.newProducer().topic(topicGc).create(); + producer3.close(); + assertTrue(pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync( + TopicName.get(topicGc)).join().partitions == partitions); + runGC(); + assertTrue(pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync( + TopicName.get(topicGc)).join().partitions == 0); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentDispatcherFailoverConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentDispatcherFailoverConsumerTest.java index bc5e05cbf1855..40f58c9a67fbe 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentDispatcherFailoverConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentDispatcherFailoverConsumerTest.java @@ -18,9 +18,11 @@ */ package org.apache.pulsar.broker.service; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.matches; import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -28,7 +30,9 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertSame; @@ -46,6 +50,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import org.apache.bookkeeper.common.util.OrderedExecutor; @@ -120,9 +125,9 @@ public class PersistentDispatcherFailoverConsumerTest { @BeforeMethod public void setup() throws Exception { executor = OrderedExecutor.newBuilder().numThreads(1).name("persistent-dispatcher-failover-test").build(); - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); svcConfig.setBrokerShutdownTimeoutMs(0L); - pulsar = spy(new PulsarService(svcConfig)); + pulsar = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); doReturn(svcConfig).when(pulsar).getConfiguration(); mlFactoryMock = mock(ManagedLedgerFactory.class); @@ -139,7 +144,7 @@ public void setup() throws Exception { PulsarResources pulsarResources = new PulsarResources(store, store); doReturn(pulsarResources).when(pulsar).getPulsarResources(); - brokerService = spy(new BrokerService(pulsar, eventLoopGroup)); + brokerService = spyWithClassAndConstructorArgs(BrokerService.class, pulsar, eventLoopGroup); doReturn(brokerService).when(pulsar).getBrokerService(); consumerChanges = new LinkedBlockingQueue<>(); @@ -165,7 +170,7 @@ public void setup() throws Exception { return null; }).when(channelCtx).writeAndFlush(any(), any()); - serverCnx = spy(new ServerCnx(pulsar)); + serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); doReturn(true).when(serverCnx).isActive(); doReturn(true).when(serverCnx).isWritable(); doReturn(new InetSocketAddress("localhost", 1234)).when(serverCnx).clientAddress(); @@ -174,7 +179,7 @@ public void setup() throws Exception { doReturn(new PulsarCommandSenderImpl(null, serverCnx)) .when(serverCnx).getCommandSender(); - serverCnxWithOldVersion = spy(new ServerCnx(pulsar)); + serverCnxWithOldVersion = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); doReturn(true).when(serverCnxWithOldVersion).isActive(); doReturn(true).when(serverCnxWithOldVersion).isWritable(); doReturn(new InetSocketAddress("localhost", 1234)) @@ -355,7 +360,8 @@ public void testAddRemoveConsumer() throws Exception { // 4. Verify active consumer assertSame(pdfc.getActiveConsumer().consumerName(), consumer1.consumerName()); // get the notified with who is the leader - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 1, true); verify(consumer1, times(2)).notifyActiveConsumerChange(same(consumer1)); @@ -367,7 +373,8 @@ public void testAddRemoveConsumer() throws Exception { assertSame(pdfc.getActiveConsumer().consumerName(), consumer1.consumerName()); assertEquals(3, consumers.size()); // get notified with who is the leader - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 2, false); verify(consumer1, times(2)).notifyActiveConsumerChange(same(consumer1)); verify(consumer2, times(1)).notifyActiveConsumerChange(same(consumer1)); @@ -382,13 +389,17 @@ public void testAddRemoveConsumer() throws Exception { assertEquals(4, consumers.size()); // all consumers will receive notifications - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 0, true); - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 1, false); - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 1, false); - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 2, false); verify(consumer0, times(1)).notifyActiveConsumerChange(same(consumer0)); verify(consumer1, times(2)).notifyActiveConsumerChange(same(consumer1)); @@ -414,9 +425,11 @@ public void testAddRemoveConsumer() throws Exception { assertEquals(2, consumers.size()); // the remaining consumers will receive notifications - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 1, true); - change = consumerChanges.take(); + change = consumerChanges.poll(10, TimeUnit.SECONDS); + assertNotNull(change); verifyActiveConsumerChange(change, 1, true); // 10. Attempt to remove already removed consumer @@ -506,15 +519,15 @@ public void testMultipleDispatcherGetNextConsumerWithDifferentPriorityLevel() th PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); PersistentDispatcherMultipleConsumers dispatcher = new PersistentDispatcherMultipleConsumers(topic, cursorMock, null); - Consumer consumer1 = createConsumer(0, 2, false, 1); - Consumer consumer2 = createConsumer(0, 2, false, 2); - Consumer consumer3 = createConsumer(0, 2, false, 3); - Consumer consumer4 = createConsumer(1, 2, false, 4); - Consumer consumer5 = createConsumer(1, 1, false, 5); - Consumer consumer6 = createConsumer(1, 2, false, 6); - Consumer consumer7 = createConsumer(2, 1, false, 7); - Consumer consumer8 = createConsumer(2, 1, false, 8); - Consumer consumer9 = createConsumer(2, 1, false, 9); + Consumer consumer1 = createConsumer(topic, 0, 2, false, 1); + Consumer consumer2 = createConsumer(topic, 0, 2, false, 2); + Consumer consumer3 = createConsumer(topic, 0, 2, false, 3); + Consumer consumer4 = createConsumer(topic, 1, 2, false, 4); + Consumer consumer5 = createConsumer(topic, 1, 1, false, 5); + Consumer consumer6 = createConsumer(topic, 1, 2, false, 6); + Consumer consumer7 = createConsumer(topic, 2, 1, false, 7); + Consumer consumer8 = createConsumer(topic, 2, 1, false, 8); + Consumer consumer9 = createConsumer(topic, 2, 1, false, 9); dispatcher.addConsumer(consumer1); dispatcher.addConsumer(consumer2); dispatcher.addConsumer(consumer3); @@ -538,7 +551,7 @@ public void testMultipleDispatcherGetNextConsumerWithDifferentPriorityLevel() th Assert.assertEquals(getNextConsumer(dispatcher), consumer7); Assert.assertEquals(getNextConsumer(dispatcher), consumer8); // in between add upper priority consumer with more permits - Consumer consumer10 = createConsumer(0, 2, false, 10); + Consumer consumer10 = createConsumer(topic, 0, 2, false, 10); dispatcher.addConsumer(consumer10); Assert.assertEquals(getNextConsumer(dispatcher), consumer10); Assert.assertEquals(getNextConsumer(dispatcher), consumer10); @@ -550,12 +563,12 @@ public void testMultipleDispatcherGetNextConsumerWithDifferentPriorityLevel() th public void testFewBlockedConsumerSamePriority() throws Exception{ PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); PersistentDispatcherMultipleConsumers dispatcher = new PersistentDispatcherMultipleConsumers(topic, cursorMock, null); - Consumer consumer1 = createConsumer(0, 2, false, 1); - Consumer consumer2 = createConsumer(0, 2, false, 2); - Consumer consumer3 = createConsumer(0, 2, false, 3); - Consumer consumer4 = createConsumer(0, 2, false, 4); - Consumer consumer5 = createConsumer(0, 1, true, 5); - Consumer consumer6 = createConsumer(0, 2, true, 6); + Consumer consumer1 = createConsumer(topic, 0, 2, false, 1); + Consumer consumer2 = createConsumer(topic, 0, 2, false, 2); + Consumer consumer3 = createConsumer(topic, 0, 2, false, 3); + Consumer consumer4 = createConsumer(topic, 0, 2, false, 4); + Consumer consumer5 = createConsumer(topic, 0, 1, true, 5); + Consumer consumer6 = createConsumer(topic, 0, 2, true, 6); dispatcher.addConsumer(consumer1); dispatcher.addConsumer(consumer2); dispatcher.addConsumer(consumer3); @@ -577,18 +590,18 @@ public void testFewBlockedConsumerSamePriority() throws Exception{ public void testFewBlockedConsumerDifferentPriority() throws Exception { PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); PersistentDispatcherMultipleConsumers dispatcher = new PersistentDispatcherMultipleConsumers(topic, cursorMock, null); - Consumer consumer1 = createConsumer(0, 2, false, 1); - Consumer consumer2 = createConsumer(0, 2, false, 2); - Consumer consumer3 = createConsumer(0, 2, false, 3); - Consumer consumer4 = createConsumer(0, 2, false, 4); - Consumer consumer5 = createConsumer(0, 1, true, 5); - Consumer consumer6 = createConsumer(0, 2, true, 6); - Consumer consumer7 = createConsumer(1, 2, false, 7); - Consumer consumer8 = createConsumer(1, 10, true, 8); - Consumer consumer9 = createConsumer(1, 2, false, 9); - Consumer consumer10 = createConsumer(2, 2, false, 10); - Consumer consumer11 = createConsumer(2, 10, true, 11); - Consumer consumer12 = createConsumer(2, 2, false, 12); + Consumer consumer1 = createConsumer(topic, 0, 2, false, 1); + Consumer consumer2 = createConsumer(topic, 0, 2, false, 2); + Consumer consumer3 = createConsumer(topic, 0, 2, false, 3); + Consumer consumer4 = createConsumer(topic, 0, 2, false, 4); + Consumer consumer5 = createConsumer(topic, 0, 1, true, 5); + Consumer consumer6 = createConsumer(topic, 0, 2, true, 6); + Consumer consumer7 = createConsumer(topic, 1, 2, false, 7); + Consumer consumer8 = createConsumer(topic, 1, 10, true, 8); + Consumer consumer9 = createConsumer(topic, 1, 2, false, 9); + Consumer consumer10 = createConsumer(topic, 2, 2, false, 10); + Consumer consumer11 = createConsumer(topic, 2, 10, true, 11); + Consumer consumer12 = createConsumer(topic, 2, 2, false, 12); dispatcher.addConsumer(consumer1); dispatcher.addConsumer(consumer2); dispatcher.addConsumer(consumer3); @@ -616,8 +629,8 @@ public void testFewBlockedConsumerDifferentPriority() throws Exception { Assert.assertEquals(getNextConsumer(dispatcher), consumer10); Assert.assertEquals(getNextConsumer(dispatcher), consumer12); // add consumer with lower priority again - Consumer consumer13 = createConsumer(0, 2, false, 13); - Consumer consumer14 = createConsumer(0, 2, true, 14); + Consumer consumer13 = createConsumer(topic, 0, 2, false, 13); + Consumer consumer14 = createConsumer(topic, 0, 2, true, 14); dispatcher.addConsumer(consumer13); dispatcher.addConsumer(consumer14); Assert.assertEquals(getNextConsumer(dispatcher), consumer13); @@ -631,13 +644,13 @@ public void testFewBlockedConsumerDifferentPriority() throws Exception { public void testFewBlockedConsumerDifferentPriority2() throws Exception { PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); PersistentDispatcherMultipleConsumers dispatcher = new PersistentDispatcherMultipleConsumers(topic, cursorMock, null); - Consumer consumer1 = createConsumer(0, 2, true, 1); - Consumer consumer2 = createConsumer(0, 2, true, 2); - Consumer consumer3 = createConsumer(0, 2, true, 3); - Consumer consumer4 = createConsumer(1, 2, false, 4); - Consumer consumer5 = createConsumer(1, 1, false, 5); - Consumer consumer6 = createConsumer(2, 1, false, 6); - Consumer consumer7 = createConsumer(2, 2, true, 7); + Consumer consumer1 = createConsumer(topic, 0, 2, true, 1); + Consumer consumer2 = createConsumer(topic, 0, 2, true, 2); + Consumer consumer3 = createConsumer(topic, 0, 2, true, 3); + Consumer consumer4 = createConsumer(topic, 1, 2, false, 4); + Consumer consumer5 = createConsumer(topic, 1, 1, false, 5); + Consumer consumer6 = createConsumer(topic, 2, 1, false, 6); + Consumer consumer7 = createConsumer(topic, 2, 2, true, 7); dispatcher.addConsumer(consumer1); dispatcher.addConsumer(consumer2); dispatcher.addConsumer(consumer3); @@ -667,9 +680,10 @@ private Consumer getNextConsumer(PersistentDispatcherMultipleConsumers dispatche return null; } - private Consumer createConsumer(int priority, int permit, boolean blocked, int id) throws Exception { + private Consumer createConsumer(PersistentTopic topic, int priority, int permit, boolean blocked, int id) throws Exception { + PersistentSubscription sub = new PersistentSubscription(topic, "sub-1", cursorMock, false); Consumer consumer = - new Consumer(null, SubType.Shared, "test-topic", id, priority, ""+id, 5000, + new Consumer(sub, SubType.Shared, "test-topic", id, priority, ""+id, 5000, serverCnx, "appId", Collections.emptyMap(), false /* read compacted */, InitialPosition.Latest, null, MessageId.latest); try { consumer.flowPermits(permit); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java index ef81ed2575e66..9bd469a955b95 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicConcurrentTest.java @@ -18,10 +18,13 @@ */ package org.apache.pulsar.broker.service; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.withSettings; import static org.testng.Assert.assertFalse; import java.lang.reflect.Method; @@ -81,10 +84,10 @@ public class PersistentTopicConcurrentTest extends MockedBookKeeperTestCase { @BeforeMethod public void setup(Method m) throws Exception { super.setUp(m); - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); svcConfig.setBrokerShutdownTimeoutMs(0L); @Cleanup - PulsarService pulsar = spy(new PulsarService(svcConfig)); + PulsarService pulsar = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); doReturn(svcConfig).when(pulsar).getConfiguration(); @Cleanup(value = "shutdownGracefully") @@ -98,10 +101,10 @@ public void setup(Method m) throws Exception { mlFactoryMock = factory; doReturn(mlFactoryMock).when(pulsar).getManagedLedgerFactory(); - brokerService = spy(new BrokerService(pulsar, eventLoopGroup)); + brokerService = spyWithClassAndConstructorArgs(BrokerService.class, pulsar, eventLoopGroup); doReturn(brokerService).when(pulsar).getBrokerService(); - serverCnx = spy(new ServerCnx(pulsar)); + serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); doReturn(true).when(serverCnx).isActive(); NamespaceService nsSvc = mock(NamespaceService.class); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicE2ETest.java index 42512b0f38bf8..9de664f98ec9d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicE2ETest.java @@ -96,6 +96,7 @@ public class PersistentTopicE2ETest extends BrokerTestBase { @BeforeMethod(alwaysRun = true) @Override protected void setup() throws Exception { + conf.setBrokerDeleteInactivePartitionedTopicMetadataEnabled(true); super.baseSetup(); } @@ -617,8 +618,27 @@ public void testGC() throws Exception { runGC(); assertFalse(pulsar.getBrokerService().getTopicReference(topicName).isPresent()); - } + // write again, the topic will be available + Producer producer2 = pulsarClient.newProducer().topic(topicName).create(); + producer2.close(); + + assertTrue(pulsar.getBrokerService().getTopicReference(topicName).isPresent()); + + // 6. Test for partitioned topic to delete the partitioned metadata + String topicGc = "persistent://prop/ns-abc/topic-gc"; + int partitions = 5; + admin.topics().createPartitionedTopic(topicGc, partitions); + Producer producer3 = pulsarClient.newProducer().topic(topicGc).create(); + producer3.close(); + assertEquals(partitions, pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync( + TopicName.get(topicGc)).join().partitions); + runGC(); + Awaitility.await().untilAsserted(()-> { + assertEquals(pulsar.getBrokerService().fetchPartitionedTopicMetadataAsync( + TopicName.get(topicGc)).join().partitions, 0); + }); + } @Data @ToString @EqualsAndHashCode @@ -1859,4 +1879,17 @@ public void testProducerBusy() throws Exception { assertEquals(admin.topics().getStats(topicName).getPublishers().size(), 1); } + + @Test + public void testHttpLookupWithNotFoundError() throws Exception { + stopBroker(); + isTcpLookup = false; + setup(); + try { + pulsarClient.newProducer().topic("unknownTenant/unknownNamespace/testNamespaceNotFound").create(); + } catch (Exception ex) { + assertTrue(ex instanceof PulsarClientException.NotFoundException); + assertTrue(ex.getMessage().contains("Namespace not found")); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java index 8603404a86a31..3e527e8135a9e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java @@ -18,9 +18,13 @@ */ package org.apache.pulsar.broker.service; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockBookKeeper; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockZooKeeper; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doAnswer; @@ -33,9 +37,11 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertSame; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; @@ -123,12 +129,10 @@ import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.compaction.CompactedTopic; +import org.apache.pulsar.compaction.CompactedTopicContext; import org.apache.pulsar.compaction.Compactor; import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.impl.ZKMetadataStore; -import org.apache.pulsar.zookeeper.ZooKeeperCache; -import org.apache.pulsar.zookeeper.ZooKeeperDataCache; -import org.apache.pulsar.broker.admin.AdminResource; import org.apache.zookeeper.ZooKeeper; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; @@ -172,10 +176,10 @@ public class PersistentTopicTest extends MockedBookKeeperTestCase { public void setup() throws Exception { eventLoopGroup = new NioEventLoopGroup(); executor = OrderedExecutor.newBuilder().numThreads(1).build(); - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); svcConfig.setAdvertisedAddress("localhost"); svcConfig.setBrokerShutdownTimeoutMs(0L); - pulsar = spy(new PulsarService(svcConfig)); + pulsar = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); doReturn(svcConfig).when(pulsar).getConfiguration(); doReturn(mock(Compactor.class)).when(pulsar).getCompactor(); @@ -196,18 +200,18 @@ public void setup() throws Exception { doReturn(executor).when(pulsar).getOrderedExecutor(); store = new ZKMetadataStore(mockZk); - PulsarResources pulsarResources = spy(new PulsarResources(store, store)); - NamespaceResources nsr = spy(new NamespaceResources(store, store, 30)); + PulsarResources pulsarResources = spyWithClassAndConstructorArgs(PulsarResources.class, store, store); + NamespaceResources nsr = spyWithClassAndConstructorArgs(NamespaceResources.class, store, store, 30); doReturn(nsr).when(pulsarResources).getNamespaceResources(); doReturn(pulsarResources).when(pulsar).getPulsarResources(); doReturn(store).when(pulsar).getLocalMetadataStore(); doReturn(store).when(pulsar).getConfigurationMetadataStore(); - brokerService = spy(new BrokerService(pulsar, eventLoopGroup)); + brokerService = spyWithClassAndConstructorArgs(BrokerService.class, pulsar, eventLoopGroup); doReturn(brokerService).when(pulsar).getBrokerService(); - serverCnx = spy(new ServerCnx(pulsar)); + serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); doReturn(true).when(serverCnx).isActive(); doReturn(true).when(serverCnx).isWritable(); doReturn(new InetSocketAddress("localhost", 1234)).when(serverCnx).clientAddress(); @@ -225,6 +229,7 @@ public void setup() throws Exception { doReturn(nsSvc).when(pulsar).getNamespaceService(); doReturn(true).when(nsSvc).isServiceUnitOwned(any()); doReturn(true).when(nsSvc).isServiceUnitActive(any()); + doReturn(CompletableFuture.completedFuture(true)).when(nsSvc).isServiceUnitActiveAsync(any()); doReturn(CompletableFuture.completedFuture(true)).when(nsSvc).checkTopicOwnership(any()); setupMLAsyncCallbackMocks(); @@ -349,7 +354,8 @@ public void setMetadataFromEntryData(ByteBuf entryData) { @Test public void testDispatcherMultiConsumerReadFailed() throws Exception { - PersistentTopic topic = spy(new PersistentTopic(successTopicName, ledgerMock, brokerService)); + PersistentTopic topic = spyWithClassAndConstructorArgs(PersistentTopic.class, + successTopicName, ledgerMock, brokerService); ManagedCursor cursor = mock(ManagedCursor.class); when(cursor.getName()).thenReturn("cursor"); PersistentDispatcherMultipleConsumers dispatcher = new PersistentDispatcherMultipleConsumers(topic, cursor, null); @@ -359,7 +365,8 @@ public void testDispatcherMultiConsumerReadFailed() throws Exception { @Test public void testDispatcherSingleConsumerReadFailed() throws Exception { - PersistentTopic topic = spy(new PersistentTopic(successTopicName, ledgerMock, brokerService)); + PersistentTopic topic = + spyWithClassAndConstructorArgs(PersistentTopic.class, successTopicName, ledgerMock, brokerService); ManagedCursor cursor = mock(ManagedCursor.class); when(cursor.getName()).thenReturn("cursor"); PersistentDispatcherSingleActiveConsumer dispatcher = new PersistentDispatcherSingleActiveConsumer(cursor, @@ -440,11 +447,20 @@ public void testAddRemoveProducer() throws Exception { // OK } - // 4. simple remove producer + // 4. Try to remove with unequal producer + Producer producerCopy = new Producer(topic, serverCnx, 1 /* producer id */, "prod-name", + role, false, null, SchemaVersion.Latest, 0, false, + ProducerAccessMode.Shared, Optional.empty()); + topic.removeProducer(producerCopy); + // Expect producer to be in map + assertEquals(topic.getProducers().size(), 1); + assertSame(topic.getProducers().get(producer.getProducerName()), producer); + + // 5. simple remove producer topic.removeProducer(producer); assertEquals(topic.getProducers().size(), 0); - // 5. duplicate remove + // 6. duplicate remove topic.removeProducer(producer); /* noop */ } @@ -547,7 +563,7 @@ private void testMaxProducers() throws Exception { @Test public void testMaxProducersForBroker() throws Exception { // set max clients - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(2).when(svcConfig).getMaxProducersPerTopic(); doReturn(svcConfig).when(pulsar).getConfiguration(); testMaxProducers(); @@ -555,7 +571,7 @@ public void testMaxProducersForBroker() throws Exception { @Test public void testMaxProducersForNamespace() throws Exception { - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(svcConfig).when(pulsar).getConfiguration(); // set max clients Policies policies = new Policies(); @@ -575,7 +591,9 @@ private Producer getMockedProducerWithSpecificAddress(Topic topic, long producer final String producerNameBase = "producer"; final String role = "appid1"; - ServerCnx cnx = spy(new ServerCnx(pulsar)); + ServerCnx cnx = mock(ServerCnx.class, withSettings() + .useConstructor(pulsar) + .defaultAnswer(CALLS_REAL_METHODS)); doReturn(true).when(cnx).isActive(); doReturn(true).when(cnx).isWritable(); doReturn(new InetSocketAddress(address, 1234)).when(cnx).clientAddress(); @@ -589,7 +607,7 @@ private Producer getMockedProducerWithSpecificAddress(Topic topic, long producer @Test public void testMaxSameAddressProducers() throws Exception { // set max clients - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(2).when(svcConfig).getMaxSameAddressProducersPerTopic(); doReturn(svcConfig).when(pulsar).getConfiguration(); @@ -822,7 +840,11 @@ private void testMaxConsumersShared() throws Exception { addConsumerToSubscription.setAccessible(true); // for count consumers on topic - ConcurrentOpenHashMap subscriptions = new ConcurrentOpenHashMap<>(16, 1); + ConcurrentOpenHashMap subscriptions = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); subscriptions.put("sub-1", sub); subscriptions.put("sub-2", sub2); Field field = topic.getClass().getDeclaredField("subscriptions"); @@ -882,7 +904,7 @@ private void testMaxConsumersShared() throws Exception { @Test public void testMaxConsumersSharedForBroker() throws Exception { // set max clients - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(2).when(svcConfig).getMaxConsumersPerSubscription(); doReturn(3).when(svcConfig).getMaxConsumersPerTopic(); doReturn(svcConfig).when(pulsar).getConfiguration(); @@ -892,7 +914,7 @@ public void testMaxConsumersSharedForBroker() throws Exception { @Test public void testMaxConsumersSharedForNamespace() throws Exception { - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(svcConfig).when(pulsar).getConfiguration(); // set max clients @@ -921,7 +943,11 @@ private void testMaxConsumersFailover() throws Exception { addConsumerToSubscription.setAccessible(true); // for count consumers on topic - ConcurrentOpenHashMap subscriptions = new ConcurrentOpenHashMap<>(16, 1); + ConcurrentOpenHashMap subscriptions = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); subscriptions.put("sub-1", sub); subscriptions.put("sub-2", sub2); Field field = topic.getClass().getDeclaredField("subscriptions"); @@ -981,7 +1007,7 @@ private void testMaxConsumersFailover() throws Exception { @Test public void testMaxConsumersFailoverForBroker() throws Exception { // set max clients - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(2).when(svcConfig).getMaxConsumersPerSubscription(); doReturn(3).when(svcConfig).getMaxConsumersPerTopic(); doReturn(svcConfig).when(pulsar).getConfiguration(); @@ -991,7 +1017,7 @@ public void testMaxConsumersFailoverForBroker() throws Exception { @Test public void testMaxConsumersFailoverForNamespace() throws Exception { - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(svcConfig).when(pulsar).getConfiguration(); // set max clients @@ -1013,7 +1039,7 @@ private Consumer getMockedConsumerWithSpecificAddress(Topic topic, Subscription final String consumerNameBase = "consumer"; final String role = "appid1"; - ServerCnx cnx = spy(new ServerCnx(pulsar)); + ServerCnx cnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); doReturn(true).when(cnx).isActive(); doReturn(true).when(cnx).isWritable(); doReturn(new InetSocketAddress(address, 1234)).when(cnx).clientAddress(); @@ -1027,7 +1053,7 @@ private Consumer getMockedConsumerWithSpecificAddress(Topic topic, Subscription @Test public void testMaxSameAddressConsumers() throws Exception { // set max clients - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(2).when(svcConfig).getMaxSameAddressConsumersPerTopic(); doReturn(svcConfig).when(pulsar).getConfiguration(); @@ -1045,7 +1071,11 @@ public void testMaxSameAddressConsumers() throws Exception { addConsumerToSubscription.setAccessible(true); // for count consumers on topic - ConcurrentOpenHashMap subscriptions = new ConcurrentOpenHashMap<>(16, 1); + ConcurrentOpenHashMap subscriptions = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); subscriptions.put("sub1", sub1); subscriptions.put("sub2", sub2); Field field = topic.getClass().getDeclaredField("subscriptions"); @@ -1768,7 +1798,10 @@ public void testAtomicReplicationRemoval() throws Exception { doReturn(remoteCluster).when(cursor).getName(); brokerService.getReplicationClients().put(remoteCluster, client); PersistentReplicator replicator = spy( - new PersistentReplicator(topic, cursor, localCluster, remoteCluster, brokerService)); + new PersistentReplicator(topic, cursor, localCluster, remoteCluster, brokerService, + (PulsarClientImpl) brokerService.getReplicationClient(remoteCluster, + brokerService.pulsar().getPulsarResources().getClusterResources() + .getCluster(remoteCluster)))); replicatorMap.put(remoteReplicatorName, replicator); // step-1 remove replicator : it will disconnect the producer but it will wait for callback to be completed @@ -1813,7 +1846,10 @@ public void testClosingReplicationProducerTwice() throws Exception { ManagedCursor cursor = mock(ManagedCursorImpl.class); doReturn(remoteCluster).when(cursor).getName(); brokerService.getReplicationClients().put(remoteCluster, client); - PersistentReplicator replicator = new PersistentReplicator(topic, cursor, localCluster, remoteCluster, brokerService); + PersistentReplicator replicator = new PersistentReplicator(topic, cursor, localCluster, remoteCluster, + brokerService, (PulsarClientImpl) brokerService.getReplicationClient(remoteCluster, + brokerService.pulsar().getPulsarResources().getClusterResources() + .getCluster(remoteCluster))); // PersistentReplicator constructor calls startProducer() verify(clientImpl) @@ -1834,6 +1870,8 @@ public void testClosingReplicationProducerTwice() throws Exception { public void testCompactorSubscription() throws Exception { PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); CompactedTopic compactedTopic = mock(CompactedTopic.class); + when(compactedTopic.newCompactedLedger(any(Position.class), anyLong())) + .thenReturn(CompletableFuture.completedFuture(mock(CompactedTopicContext.class))); PersistentSubscription sub = new CompactorSubscription(topic, compactedTopic, Compactor.COMPACTION_SUBSCRIPTION, cursorMock); @@ -1856,6 +1894,8 @@ public void testCompactorSubscriptionUpdatedOnInit() throws Exception { PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); CompactedTopic compactedTopic = mock(CompactedTopic.class); + when(compactedTopic.newCompactedLedger(any(Position.class), anyLong())) + .thenReturn(CompletableFuture.completedFuture(null)); new CompactorSubscription(topic, compactedTopic, Compactor.COMPACTION_SUBSCRIPTION, cursorMock); verify(compactedTopic, Mockito.times(1)).newCompactedLedger(position, ledgerId); } @@ -2060,15 +2100,25 @@ public void addFailed(ManagedLedgerException exception, Object ctx) { public void testCheckInactiveSubscriptions() throws Exception { PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); - ConcurrentOpenHashMap subscriptions = new ConcurrentOpenHashMap<>(16, 1); + ConcurrentOpenHashMap subscriptions = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); // This subscription is connected by consumer. - PersistentSubscription nonDeletableSubscription1 = spy(new PersistentSubscription(topic, "nonDeletableSubscription1", cursorMock, false)); + PersistentSubscription nonDeletableSubscription1 = + spyWithClassAndConstructorArgs(PersistentSubscription.class, topic, + "nonDeletableSubscription1", cursorMock, false); subscriptions.put(nonDeletableSubscription1.getName(), nonDeletableSubscription1); // This subscription is not connected by consumer. - PersistentSubscription deletableSubscription1 = spy(new PersistentSubscription(topic, "deletableSubscription1", cursorMock, false)); + PersistentSubscription deletableSubscription1 = + spyWithClassAndConstructorArgs(PersistentSubscription.class, + topic, "deletableSubscription1", cursorMock, false); subscriptions.put(deletableSubscription1.getName(), deletableSubscription1); // This subscription is replicated. - PersistentSubscription nonDeletableSubscription2 = spy(new PersistentSubscription(topic, "nonDeletableSubscription2", cursorMock, true)); + PersistentSubscription nonDeletableSubscription2 = + spyWithClassAndConstructorArgs(PersistentSubscription.class, topic, + "nonDeletableSubscription2", cursorMock, true); subscriptions.put(nonDeletableSubscription2.getName(), nonDeletableSubscription2); Field field = topic.getClass().getDeclaredField("subscriptions"); @@ -2087,7 +2137,7 @@ public void testCheckInactiveSubscriptions() throws Exception { NamespaceName ns = TopicName.get(successTopicName).getNamespaceObject(); doReturn(Optional.of(new Policies())).when(nsr).getPolicies(ns); - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(5).when(svcConfig).getSubscriptionExpirationTimeMinutes(); doReturn(svcConfig).when(pulsar).getConfiguration(); @@ -2102,7 +2152,7 @@ public void testCheckInactiveSubscriptions() throws Exception { @Test public void testTopicFencingTimeout() throws Exception { - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); doReturn(svcConfig).when(pulsar).getConfiguration(); PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService); @@ -2137,6 +2187,28 @@ public void testTopicFencingTimeout() throws Exception { assertTrue((boolean) isClosingOrDeletingField.get(topic)); } + @Test + public void testTopicCloseFencingTimeout() throws Exception { + pulsar.getConfiguration().setTopicFencingTimeoutSeconds(10); + Method fence = PersistentTopic.class.getDeclaredMethod("fence"); + fence.setAccessible(true); + Field fencedTopicMonitoringTaskField = PersistentTopic.class.getDeclaredField("fencedTopicMonitoringTask"); + fencedTopicMonitoringTaskField.setAccessible(true); + + // create topic + PersistentTopic topic = (PersistentTopic) brokerService.getOrCreateTopic(successTopicName).get(); + + // fence topic to init fencedTopicMonitoringTask + fence.invoke(topic); + + // close topic + topic.close().get(); + assertFalse(brokerService.getTopicReference(successTopicName).isPresent()); + ScheduledFuture fencedTopicMonitoringTask = (ScheduledFuture) fencedTopicMonitoringTaskField.get(topic); + assertTrue(fencedTopicMonitoringTask.isDone()); + assertTrue(fencedTopicMonitoringTask.isCancelled()); + } + @Test public void testGetDurableSubscription() throws Exception { ManagedLedger mockLedger = mock(ManagedLedger.class); @@ -2150,7 +2222,7 @@ public void testGetDurableSubscription() throws Exception { doAnswer((Answer) invocationOnMock -> { ((AsyncCallbacks.ResetCursorCallback) invocationOnMock.getArguments()[1]).resetComplete(null); return null; - }).when(mockCursor).asyncResetCursor(any(), any()); + }).when(mockCursor).asyncResetCursor(any(), anyBoolean(), any()); doAnswer((Answer) invocationOnMock -> { ((DeleteCursorCallback) invocationOnMock.getArguments()[1]).deleteCursorComplete(null); return null; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorRemoveClusterTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorRemoveClusterTest.java index 701ab47fa7f69..65e90966ea541 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorRemoveClusterTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorRemoveClusterTest.java @@ -75,7 +75,9 @@ public void testRemoveClusterFromNamespace() throws Exception { admin1.namespaces().createNamespace("pulsar1/ns1", Sets.newHashSet("r1", "r2", "r3")); - PulsarClient repClient1 = pulsar1.getBrokerService().getReplicationClient("r3"); + PulsarClient repClient1 = pulsar1.getBrokerService().getReplicationClient("r3", + pulsar1.getBrokerService().pulsar().getPulsarResources().getClusterResources() + .getCluster("r3")); Assert.assertNotNull(repClient1); Assert.assertFalse(repClient1.isClosed()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java index 9b4fcc87aa186..d9ac04c3fc2dd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ReplicatorTest.java @@ -39,6 +39,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.UUID; @@ -68,6 +69,7 @@ import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; @@ -76,6 +78,8 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.TypedMessageBuilder; +import org.apache.pulsar.client.api.schema.GenericRecord; +import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.ProducerImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ProducerConfigurationData; @@ -87,7 +91,6 @@ import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ReplicatorStats; import org.apache.pulsar.common.policies.data.RetentionPolicies; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.schema.Schemas; @@ -262,7 +265,9 @@ public void testConcurrentReplicator() throws Exception { .getOrCreateTopic(topicName.toString()).get(); PulsarClientImpl pulsarClient = spy((PulsarClientImpl) pulsar1.getBrokerService() - .getReplicationClient("r3")); + .getReplicationClient("r3", + pulsar1.getBrokerService().pulsar().getPulsarResources().getClusterResources() + .getCluster("r3"))); final Method startRepl = PersistentTopic.class.getDeclaredMethod("startReplicator", String.class); startRepl.setAccessible(true); @@ -376,8 +381,11 @@ public void testReplication(String namespace) throws Exception { consumer3.receive(1); } - @Test + @Test(invocationCount = 5) public void testReplicationWithSchema() throws Exception { + config1.setBrokerDeduplicationEnabled(true); + config2.setBrokerDeduplicationEnabled(true); + config3.setBrokerDeduplicationEnabled(true); PulsarClient client1 = pulsar1.getClient(); PulsarClient client2 = pulsar2.getClient(); PulsarClient client3 = pulsar3.getClient(); @@ -389,47 +397,86 @@ public void testReplicationWithSchema() throws Exception { @Cleanup Producer producer1 = client1.newProducer(Schema.AVRO(Schemas.PersonOne.class)) .topic(topic.toString()) + .enableBatching(false) .create(); + @Cleanup - Producer producer2 = client2.newProducer(Schema.AVRO(Schemas.PersonOne.class)) - .topic(topic.toString()) - .create(); - @Cleanup - Producer producer3 = client3.newProducer(Schema.AVRO(Schemas.PersonOne.class)) + Producer producer2 = client1.newProducer(Schema.AVRO(Schemas.PersonThree.class)) .topic(topic.toString()) + .enableBatching(false) .create(); - List> producers = Lists.newArrayList(producer1, producer2, producer3); + admin1.topics().createSubscription(topic.toString(), subName, MessageId.earliest); + admin2.topics().createSubscription(topic.toString(), subName, MessageId.earliest); + admin3.topics().createSubscription(topic.toString(), subName, MessageId.earliest); + + final int totalMessages = 1000; + + for (int i = 0; i < totalMessages / 2; i++) { + producer1.sendAsync(new Schemas.PersonOne(i)); + } + + for (int i = 500; i < totalMessages; i++) { + producer2.sendAsync(new Schemas.PersonThree(i, "name-" + i)); + } + + Awaitility.await().untilAsserted(() -> { + assertTrue(admin1.topics().getInternalStats(topic.toString()).schemaLedgers.size() > 0); + assertTrue(admin2.topics().getInternalStats(topic.toString()).schemaLedgers.size() > 0); + assertTrue(admin3.topics().getInternalStats(topic.toString()).schemaLedgers.size() > 0); + }); @Cleanup - Consumer consumer1 = client1.newConsumer(Schema.AVRO(Schemas.PersonOne.class)) + Consumer consumer1 = client1.newConsumer(Schema.AUTO_CONSUME()) .topic(topic.toString()) .subscriptionName(subName) .subscribe(); @Cleanup - Consumer consumer2 = client2.newConsumer(Schema.AVRO(Schemas.PersonOne.class)) + Consumer consumer2 = client2.newConsumer(Schema.AUTO_CONSUME()) .topic(topic.toString()) .subscriptionName(subName) .subscribe(); @Cleanup - Consumer consumer3 = client3.newConsumer(Schema.AVRO(Schemas.PersonOne.class)) + Consumer consumer3 = client3.newConsumer(Schema.AUTO_CONSUME()) .topic(topic.toString()) .subscriptionName(subName) .subscribe(); - for (int i = 0; i < 3; i++) { - producers.get(i).send(new Schemas.PersonOne(i)); - Message msg1 = consumer1.receive(); - Message msg2 = consumer2.receive(); - Message msg3 = consumer3.receive(); + int lastId = -1; + for (int i = 0; i < totalMessages; i++) { + Message msg1 = consumer1.receive(); + Message msg2 = consumer2.receive(); + Message msg3 = consumer3.receive(); assertTrue(msg1 != null && msg2 != null && msg3 != null); - assertTrue(msg1.getValue().equals(msg2.getValue()) && msg2.getValue().equals(msg3.getValue())); + GenericRecord record1 = msg1.getValue(); + GenericRecord record2 = msg2.getValue(); + GenericRecord record3 = msg3.getValue(); + int id1 = (int) record1.getField("id"); + int id2 = (int) record2.getField("id"); + int id3 = (int) record3.getField("id"); + log.info("Received ids, id1: {}, id2: {}, id3: {}, lastId: {}", id1, id2, id3, lastId); + assertTrue(id1 == id2 && id2 == id3); + assertTrue(id1 > lastId); + lastId = id1; consumer1.acknowledge(msg1); consumer2.acknowledge(msg2); consumer3.acknowledge(msg3); } + + @Cleanup + Producer producerBytes = client1.newProducer() + .topic(topic.toString()) + .enableBatching(false) + .create(); + + byte[] data = "Bytes".getBytes(); + producerBytes.send(data); + + assertEquals(consumer1.receive().getValue().getNativeObject(), data); + assertEquals(consumer2.receive().getValue().getNativeObject(), data); + assertEquals(consumer3.receive().getValue().getNativeObject(), data); } @Test @@ -713,6 +760,54 @@ public void testReplicatorProducerClosing() throws Exception { assertNull(producer); } + @Test(priority = 5, timeOut = 30000) + public void testReplicatorProducerName() throws Exception { + log.info("--- Starting ReplicatorTest::testReplicatorProducerName ---"); + final String topicName = BrokerTestUtil.newUniqueName("persistent://pulsar/ns/testReplicatorProducerName"); + final TopicName dest = TopicName.get(topicName); + + @Cleanup + MessageProducer producer1 = new MessageProducer(url1, dest); + + Awaitility.await().untilAsserted(() -> { + assertTrue(pulsar2.getBrokerService().getTopicReference(topicName).isPresent()); + }); + Optional topic = pulsar2.getBrokerService().getTopicReference(topicName); + assertTrue(topic.isPresent()); + Awaitility.await().untilAsserted(() -> { + Set remoteClusters = topic.get().getProducers().values().stream() + .map(org.apache.pulsar.broker.service.Producer::getRemoteCluster) + .collect(Collectors.toSet()); + assertTrue(remoteClusters.contains("r1")); + }); + } + + @Test(priority = 5, timeOut = 30000) + public void testReplicatorProducerNameWithUserDefinedReplicatorPrefix() throws Exception { + log.info("--- Starting ReplicatorTest::testReplicatorProducerNameWithUserDefinedReplicatorPrefix ---"); + final String topicName = BrokerTestUtil.newUniqueName( + "persistent://pulsar/ns/testReplicatorProducerNameWithUserDefinedReplicatorPrefix"); + final TopicName dest = TopicName.get(topicName); + + pulsar1.getConfiguration().setReplicatorPrefix("user-defined-prefix"); + pulsar2.getConfiguration().setReplicatorPrefix("user-defined-prefix"); + pulsar3.getConfiguration().setReplicatorPrefix("user-defined-prefix"); + + @Cleanup + MessageProducer producer1 = new MessageProducer(url1, dest); + + Awaitility.await().untilAsserted(()->{ + assertTrue(pulsar2.getBrokerService().getTopicReference(topicName).isPresent()); + }); + Optional topic = pulsar2.getBrokerService().getTopicReference(topicName); + assertTrue(topic.isPresent()); + Set remoteClusters = topic.get().getProducers().values().stream() + .map(org.apache.pulsar.broker.service.Producer::getRemoteCluster) + .collect(Collectors.toSet()); + assertTrue(remoteClusters.contains("r1")); + } + + /** * Issue #199 * @@ -1261,6 +1356,27 @@ public void testDoNotReplicateSystemTopic() throws Exception { }); } + @Test + public void testLookupAnotherCluster() throws Exception { + log.info("--- Starting ReplicatorTest::testLookupAnotherCluster ---"); + + String namespace = "pulsar/r2/cross-cluster-ns"; + admin1.namespaces().createNamespace(namespace); + final TopicName topicName = TopicName + .get("persistent://" + namespace + "/topic"); + + @Cleanup + PulsarClient client1 = PulsarClient.builder() + .serviceUrl(url1.toString()).statsInterval(0, TimeUnit.SECONDS) + .build(); + Producer producer = client1.newProducer().topic(topicName.toString()) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + + producer.close(); + } + private void checkListContainExpectedTopic(PulsarAdmin admin, String namespace, List expectedTopicList) { // wait non-partitioned topics replicators created finished final List list = new ArrayList<>(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxAuthorizationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxAuthorizationTest.java new file mode 100644 index 0000000000000..0d4580d044cd4 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxAuthorizationTest.java @@ -0,0 +1,433 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.broker.service; + +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; +import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockBookKeeper; +import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockZooKeeper; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +import com.google.common.collect.Sets; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.SignatureAlgorithm; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import javax.crypto.SecretKey; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Collections; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import org.apache.bookkeeper.common.util.OrderedExecutor; +import org.apache.bookkeeper.mledger.ManagedLedgerFactory; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.authentication.AuthenticationDataSource; +import org.apache.pulsar.broker.authentication.AuthenticationDataSubscription; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.broker.authorization.AuthorizationService; +import org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider; +import org.apache.pulsar.broker.intercept.BrokerInterceptor; +import org.apache.pulsar.broker.resources.NamespaceResources; +import org.apache.pulsar.broker.resources.PulsarResources; +import org.apache.pulsar.broker.resources.TenantResources; +import org.apache.pulsar.broker.service.schema.DefaultSchemaRegistryService; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.api.proto.CommandConnect; +import org.apache.pulsar.common.api.proto.CommandLookupTopic; +import org.apache.pulsar.common.api.proto.CommandProducer; +import org.apache.pulsar.common.api.proto.CommandSubscribe; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.common.policies.data.TopicOperation; +import org.apache.pulsar.metadata.api.MetadataStore; +import org.apache.pulsar.metadata.impl.ZKMetadataStore; +import org.apache.zookeeper.ZooKeeper; +import org.mockito.ArgumentMatcher; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class ServerCnxAuthorizationTest { + private final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + private final String CLIENT_PRINCIPAL = "client"; + private final String PROXY_PRINCIPAL = "proxy"; + private final String CLIENT_TOKEN = Jwts.builder().setSubject(CLIENT_PRINCIPAL).signWith(SECRET_KEY).compact(); + private final String PROXY_TOKEN = Jwts.builder().setSubject(PROXY_PRINCIPAL).signWith(SECRET_KEY).compact(); + + private PulsarService pulsar; + private PulsarResources pulsarResources; + private BrokerService brokerService; + private ServiceConfiguration svcConfig; + + @BeforeMethod(alwaysRun = true) + public void beforeMethod() throws Exception { + EventLoopGroup eventLoopGroup = new NioEventLoopGroup(); + svcConfig = spy(ServiceConfiguration.class); + svcConfig.setKeepAliveIntervalSeconds(0); + svcConfig.setBrokerShutdownTimeoutMs(0L); + svcConfig.setLoadBalancerOverrideBrokerNicSpeedGbps(1.0d); + svcConfig.setClusterName("pulsar-cluster"); + svcConfig.setSuperUserRoles(Collections.singleton(PROXY_PRINCIPAL)); + svcConfig.setAuthenticationEnabled(true); + svcConfig.setAuthenticationProviders(Sets.newHashSet(AuthenticationProviderToken.class.getName())); + svcConfig.setAuthorizationEnabled(true); + svcConfig.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); + Properties properties = new Properties(); + properties.setProperty("tokenSecretKey", "data:;base64," + + Base64.getEncoder().encodeToString(SECRET_KEY.getEncoded())); + svcConfig.setProperties(properties); + + pulsar = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); + doReturn(new DefaultSchemaRegistryService()).when(pulsar).getSchemaRegistryService(); + + doReturn(svcConfig).when(pulsar).getConfiguration(); + doReturn(mock(PulsarResources.class)).when(pulsar).getPulsarResources(); + + ManagedLedgerFactory mlFactoryMock = mock(ManagedLedgerFactory.class); + doReturn(mlFactoryMock).when(pulsar).getManagedLedgerFactory(); + + ZooKeeper mockZk = createMockZooKeeper(); + OrderedExecutor executor = OrderedExecutor.newBuilder().numThreads(1).build(); + doReturn(createMockBookKeeper(executor)) + .when(pulsar).getBookKeeperClient(); + + MetadataStore store = new ZKMetadataStore(mockZk); + + doReturn(store).when(pulsar).getLocalMetadataStore(); + doReturn(store).when(pulsar).getConfigurationMetadataStore(); + + pulsarResources = spyWithClassAndConstructorArgs(PulsarResources.class, store, store); + doReturn(pulsarResources).when(pulsar).getPulsarResources(); + NamespaceResources namespaceResources = + spyWithClassAndConstructorArgs(NamespaceResources.class, store, store, 30); + doReturn(namespaceResources).when(pulsarResources).getNamespaceResources(); + + TenantResources tenantResources = spyWithClassAndConstructorArgs(TenantResources.class, store, 30); + doReturn(tenantResources).when(pulsarResources).getTenantResources(); + + doReturn(CompletableFuture.completedFuture(Optional.of(TenantInfo.builder().build()))).when(tenantResources) + .getTenantAsync("public"); + + brokerService = spyWithClassAndConstructorArgs(BrokerService.class, pulsar, eventLoopGroup); + BrokerInterceptor interceptor = mock(BrokerInterceptor.class); + doReturn(interceptor).when(brokerService).getInterceptor(); + doReturn(brokerService).when(pulsar).getBrokerService(); + doReturn(executor).when(pulsar).getOrderedExecutor(); + } + + @Test + public void testVerifyOriginalPrincipalWithAuthDataForwardedFromProxy() throws Exception { + doReturn(true).when(svcConfig).isAuthenticateOriginalAuthData(); + + ServerCnx serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); + Channel channel = mock(Channel.class); + ChannelPipeline channelPipeline = mock(ChannelPipeline.class); + doReturn(channelPipeline).when(channel).pipeline(); + doReturn(null).when(channelPipeline).get(PulsarChannelInitializer.TLS_HANDLER); + + SocketAddress socketAddress = new InetSocketAddress(0); + doReturn(socketAddress).when(channel).remoteAddress(); + doReturn(channel).when(channelHandlerContext).channel(); + channelHandlerContext.channel().remoteAddress(); + serverCnx.channelActive(channelHandlerContext); + + // connect + AuthenticationToken clientAuthenticationToken = new AuthenticationToken(CLIENT_TOKEN); + AuthenticationToken proxyAuthenticationToken = new AuthenticationToken(PROXY_TOKEN); + CommandConnect connect = new CommandConnect(); + connect.setAuthMethodName(proxyAuthenticationToken.getAuthMethodName()); + connect.setAuthData(proxyAuthenticationToken.getAuthData().getCommandData().getBytes(StandardCharsets.UTF_8)); + connect.setClientVersion("test"); + connect.setProtocolVersion(1); + connect.setOriginalPrincipal(CLIENT_PRINCIPAL); + connect.setOriginalAuthData(clientAuthenticationToken.getAuthData().getCommandData()); + connect.setOriginalAuthMethod(clientAuthenticationToken.getAuthMethodName()); + + serverCnx.handleConnect(connect); + assertEquals(serverCnx.getOriginalAuthData().getCommandData(), + clientAuthenticationToken.getAuthData().getCommandData()); + assertEquals(serverCnx.getOriginalAuthState().getAuthRole(), CLIENT_PRINCIPAL); + assertEquals(serverCnx.getOriginalPrincipal(), CLIENT_PRINCIPAL); + assertEquals(serverCnx.getAuthData().getCommandData(), + proxyAuthenticationToken.getAuthData().getCommandData()); + assertEquals(serverCnx.getAuthRole(), PROXY_PRINCIPAL); + assertEquals(serverCnx.getAuthState().getAuthRole(), PROXY_PRINCIPAL); + + AuthorizationService authorizationService = + spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsarResources); + doReturn(authorizationService).when(brokerService).getAuthorizationService(); + + // lookup + CommandLookupTopic commandLookupTopic = new CommandLookupTopic(); + TopicName topicName = TopicName.get("persistent://public/default/test-topic"); + commandLookupTopic.setTopic(topicName.toString()); + commandLookupTopic.setRequestId(1); + serverCnx.handleLookup(commandLookupTopic); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.LOOKUP, + CLIENT_PRINCIPAL, + serverCnx.getOriginalAuthData()); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.LOOKUP, + PROXY_PRINCIPAL, + serverCnx.getAuthData()); + + // producer + CommandProducer commandProducer = new CommandProducer(); + commandProducer.setRequestId(1); + commandProducer.setProducerId(1); + commandProducer.setProducerName("test-producer"); + commandProducer.setTopic(topicName.toString()); + serverCnx.handleProducer(commandProducer); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.PRODUCE, + CLIENT_PRINCIPAL, + serverCnx.getOriginalAuthData()); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.LOOKUP, + PROXY_PRINCIPAL, + serverCnx.getAuthData()); + + // consumer + CommandSubscribe commandSubscribe = new CommandSubscribe(); + commandSubscribe.setTopic(topicName.toString()); + commandSubscribe.setRequestId(1); + commandSubscribe.setConsumerId(1); + final String subscriptionName = "test-subscribe"; + commandSubscribe.setSubscription("test-subscribe"); + commandSubscribe.setSubType(CommandSubscribe.SubType.Shared); + serverCnx.handleSubscribe(commandSubscribe); + + verify(authorizationService, times(1)).allowTopicOperationAsync( + eq(topicName), eq(TopicOperation.CONSUME), + eq(CLIENT_PRINCIPAL), argThat(arg -> { + assertTrue(arg instanceof AuthenticationDataSubscription); + try { + assertEquals(arg.getCommandData(), clientAuthenticationToken.getAuthData().getCommandData()); + } catch (PulsarClientException e) { + fail(e.getMessage()); + } + assertEquals(arg.getSubscription(), subscriptionName); + return true; + })); + verify(authorizationService, times(1)).allowTopicOperationAsync( + eq(topicName), eq(TopicOperation.CONSUME), + eq(PROXY_PRINCIPAL), argThat(arg -> { + assertTrue(arg instanceof AuthenticationDataSubscription); + try { + assertEquals(arg.getCommandData(), proxyAuthenticationToken.getAuthData().getCommandData()); + } catch (PulsarClientException e) { + fail(e.getMessage()); + } + assertEquals(arg.getSubscription(), subscriptionName); + return true; + })); + } + + @Test + public void testVerifyOriginalPrincipalWithoutAuthDataForwardedFromProxy() throws Exception { + doReturn(false).when(svcConfig).isAuthenticateOriginalAuthData(); + + ServerCnx serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); + Channel channel = mock(Channel.class); + ChannelPipeline channelPipeline = mock(ChannelPipeline.class); + doReturn(channelPipeline).when(channel).pipeline(); + doReturn(null).when(channelPipeline).get(PulsarChannelInitializer.TLS_HANDLER); + + SocketAddress socketAddress = new InetSocketAddress(0); + doReturn(socketAddress).when(channel).remoteAddress(); + doReturn(channel).when(channelHandlerContext).channel(); + channelHandlerContext.channel().remoteAddress(); + serverCnx.channelActive(channelHandlerContext); + + // connect + AuthenticationToken proxyAuthenticationToken = new AuthenticationToken(PROXY_TOKEN); + CommandConnect connect = new CommandConnect(); + connect.setAuthMethodName(proxyAuthenticationToken.getAuthMethodName()); + connect.setAuthData(proxyAuthenticationToken.getAuthData().getCommandData().getBytes(StandardCharsets.UTF_8)); + connect.setClientVersion("test"); + connect.setProtocolVersion(1); + connect.setOriginalPrincipal(CLIENT_PRINCIPAL); + serverCnx.handleConnect(connect); + assertNull(serverCnx.getOriginalAuthData()); + assertNull(serverCnx.getOriginalAuthState()); + assertEquals(serverCnx.getOriginalPrincipal(), CLIENT_PRINCIPAL); + assertEquals(serverCnx.getAuthData().getCommandData(), + proxyAuthenticationToken.getAuthData().getCommandData()); + assertEquals(serverCnx.getAuthRole(), PROXY_PRINCIPAL); + assertEquals(serverCnx.getAuthState().getAuthRole(), PROXY_PRINCIPAL); + + AuthorizationService authorizationService = + spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsarResources); + doReturn(authorizationService).when(brokerService).getAuthorizationService(); + + // lookup + CommandLookupTopic commandLookupTopic = new CommandLookupTopic(); + TopicName topicName = TopicName.get("persistent://public/default/test-topic"); + commandLookupTopic.setTopic(topicName.toString()); + commandLookupTopic.setRequestId(1); + serverCnx.handleLookup(commandLookupTopic); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.LOOKUP, + CLIENT_PRINCIPAL, + serverCnx.getAuthData()); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.LOOKUP, + PROXY_PRINCIPAL, + serverCnx.getAuthData()); + + // producer + CommandProducer commandProducer = new CommandProducer(); + commandProducer.setRequestId(1); + commandProducer.setProducerId(1); + commandProducer.setProducerName("test-producer"); + commandProducer.setTopic(topicName.toString()); + serverCnx.handleProducer(commandProducer); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.PRODUCE, + CLIENT_PRINCIPAL, + serverCnx.getAuthData()); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.LOOKUP, + PROXY_PRINCIPAL, + serverCnx.getAuthData()); + + // consumer + CommandSubscribe commandSubscribe = new CommandSubscribe(); + commandSubscribe.setTopic(topicName.toString()); + commandSubscribe.setRequestId(1); + commandSubscribe.setConsumerId(1); + final String subscriptionName = "test-subscribe"; + commandSubscribe.setSubscription("test-subscribe"); + commandSubscribe.setSubType(CommandSubscribe.SubType.Shared); + serverCnx.handleSubscribe(commandSubscribe); + + ArgumentMatcher authenticationDataSourceArgumentMatcher = arg -> { + assertTrue(arg instanceof AuthenticationDataSubscription); + try { + assertEquals(arg.getCommandData(), proxyAuthenticationToken.getAuthData().getCommandData()); + } catch (PulsarClientException e) { + fail(e.getMessage()); + } + assertEquals(arg.getSubscription(), subscriptionName); + return true; + }; + + verify(authorizationService, times(1)).allowTopicOperationAsync( + eq(topicName), eq(TopicOperation.CONSUME), + eq(CLIENT_PRINCIPAL), argThat(authenticationDataSourceArgumentMatcher)); + verify(authorizationService, times(1)).allowTopicOperationAsync( + eq(topicName), eq(TopicOperation.CONSUME), + eq(PROXY_PRINCIPAL), argThat(authenticationDataSourceArgumentMatcher)); + } + + @Test + public void testVerifyAuthRoleAndAuthDataFromDirectConnectionBroker() throws Exception { + ServerCnx serverCnx = spyWithClassAndConstructorArgs(ServerCnx.class, pulsar); + + ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); + Channel channel = mock(Channel.class); + ChannelPipeline channelPipeline = mock(ChannelPipeline.class); + doReturn(channelPipeline).when(channel).pipeline(); + doReturn(null).when(channelPipeline).get(PulsarChannelInitializer.TLS_HANDLER); + + SocketAddress socketAddress = new InetSocketAddress(0); + doReturn(socketAddress).when(channel).remoteAddress(); + doReturn(channel).when(channelHandlerContext).channel(); + channelHandlerContext.channel().remoteAddress(); + serverCnx.channelActive(channelHandlerContext); + + // connect + AuthenticationToken clientAuthenticationToken = new AuthenticationToken(CLIENT_TOKEN); + CommandConnect connect = new CommandConnect(); + connect.setAuthMethodName(clientAuthenticationToken.getAuthMethodName()); + connect.setAuthData(clientAuthenticationToken.getAuthData().getCommandData().getBytes(StandardCharsets.UTF_8)); + connect.setClientVersion("test"); + connect.setProtocolVersion(1); + serverCnx.handleConnect(connect); + assertNull(serverCnx.getOriginalAuthData()); + assertNull(serverCnx.getOriginalAuthState()); + assertNull(serverCnx.getOriginalPrincipal()); + assertEquals(serverCnx.getAuthData().getCommandData(), + clientAuthenticationToken.getAuthData().getCommandData()); + assertEquals(serverCnx.getAuthRole(), CLIENT_PRINCIPAL); + assertEquals(serverCnx.getAuthState().getAuthRole(), CLIENT_PRINCIPAL); + + AuthorizationService authorizationService = + spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsarResources); + doReturn(authorizationService).when(brokerService).getAuthorizationService(); + + // lookup + CommandLookupTopic commandLookupTopic = new CommandLookupTopic(); + TopicName topicName = TopicName.get("persistent://public/default/test-topic"); + commandLookupTopic.setTopic(topicName.toString()); + commandLookupTopic.setRequestId(1); + serverCnx.handleLookup(commandLookupTopic); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.LOOKUP, + CLIENT_PRINCIPAL, + serverCnx.getAuthData()); + + // producer + CommandProducer commandProducer = new CommandProducer(); + commandProducer.setRequestId(1); + commandProducer.setProducerId(1); + commandProducer.setProducerName("test-producer"); + commandProducer.setTopic(topicName.toString()); + serverCnx.handleProducer(commandProducer); + verify(authorizationService, times(1)).allowTopicOperationAsync(topicName, TopicOperation.PRODUCE, + CLIENT_PRINCIPAL, + serverCnx.getAuthData()); + + // consumer + CommandSubscribe commandSubscribe = new CommandSubscribe(); + commandSubscribe.setTopic(topicName.toString()); + commandSubscribe.setRequestId(1); + commandSubscribe.setConsumerId(1); + final String subscriptionName = "test-subscribe"; + commandSubscribe.setSubscription("test-subscribe"); + commandSubscribe.setSubType(CommandSubscribe.SubType.Shared); + serverCnx.handleSubscribe(commandSubscribe); + + verify(authorizationService, times(1)).allowTopicOperationAsync( + eq(topicName), eq(TopicOperation.CONSUME), + eq(CLIENT_PRINCIPAL), argThat(arg -> { + assertTrue(arg instanceof AuthenticationDataSubscription); + try { + assertEquals(arg.getCommandData(), clientAuthenticationToken.getAuthData().getCommandData()); + } catch (PulsarClientException e) { + fail(e.getMessage()); + } + assertEquals(arg.getSubscription(), subscriptionName); + return true; + })); + } +} \ No newline at end of file diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxPowerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxPowerTest.java new file mode 100644 index 0000000000000..7042a40cdb6e5 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxPowerTest.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.service; + +import static org.mockito.Mockito.CALLS_REAL_METHODS; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import org.apache.pulsar.common.api.proto.CommandAuthResponse; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.testng.IObjectFactory; +import org.testng.annotations.ObjectFactory; +import org.testng.annotations.Test; + + +@PrepareForTest({CommandAuthResponse.class, org.apache.pulsar.common.api.proto.AuthData.class}) +@PowerMockIgnore({"com.sun.org.apache.xerces.*", "javax.xml.*", "org.xml.*", "javax.management.*", "org.w3c.dom.*"}) +public class ServerCnxPowerTest { + + @ObjectFactory + public IObjectFactory getObjectFactory() { + return new org.powermock.modules.testng.PowerMockObjectFactory(); + } + + @Test + public void testHandleAuthResponseWithoutClientVersion() { + ServerCnx cnx = PowerMockito.mock(ServerCnx.class, CALLS_REAL_METHODS); + CommandAuthResponse authResponse = PowerMockito.mock(CommandAuthResponse.class); + org.apache.pulsar.common.api.proto.AuthData authData = + PowerMockito.mock(org.apache.pulsar.common.api.proto.AuthData.class); + when(authResponse.getResponse()).thenReturn(authData); + when(authResponse.hasResponse()).thenReturn(true); + when(authResponse.getResponse().hasAuthMethodName()).thenReturn(true); + when(authResponse.getResponse().hasAuthData()).thenReturn(true); + when(authResponse.hasClientVersion()).thenReturn(false); + try { + cnx.handleAuthResponse(authResponse); + } catch (Exception ignore) { + } + verify(authResponse, times(1)).hasClientVersion(); + verify(authResponse, times(0)).getClientVersion(); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java index 2dc57b54e2c36..c07ad17e23a9e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/ServerCnxTest.java @@ -18,15 +18,20 @@ */ package org.apache.pulsar.broker.service; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockBookKeeper; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockZooKeeper; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.matches; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; @@ -50,6 +55,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Predicate; import java.util.function.Supplier; import org.apache.bookkeeper.common.util.OrderedExecutor; import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback; @@ -86,6 +93,7 @@ import org.apache.pulsar.common.api.proto.BaseCommand; import org.apache.pulsar.common.api.proto.BaseCommand.Type; import org.apache.pulsar.common.api.proto.CommandAck.AckType; +import org.apache.pulsar.common.api.proto.CommandAuthResponse; import org.apache.pulsar.common.api.proto.CommandConnected; import org.apache.pulsar.common.api.proto.CommandError; import org.apache.pulsar.common.api.proto.CommandLookupTopicResponse; @@ -106,11 +114,14 @@ import org.apache.pulsar.common.protocol.Commands.ChecksumType; import org.apache.pulsar.common.protocol.PulsarHandler; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.collections.ConcurrentLongHashMap; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; import org.apache.pulsar.metadata.impl.ZKMetadataStore; import org.apache.pulsar.zookeeper.ZooKeeperDataCache; import org.apache.zookeeper.ZooKeeper; import org.awaitility.Awaitility; +import org.mockito.ArgumentCaptor; +import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -152,9 +163,9 @@ public class ServerCnxTest { public void setup() throws Exception { eventLoopGroup = new NioEventLoopGroup(); executor = OrderedExecutor.newBuilder().numThreads(1).build(); - svcConfig = spy(new ServiceConfiguration()); + svcConfig = spy(ServiceConfiguration.class); svcConfig.setBrokerShutdownTimeoutMs(0L); - pulsar = spy(new PulsarService(svcConfig)); + pulsar = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); doReturn(new DefaultSchemaRegistryService()).when(pulsar).getSchemaRegistryService(); svcConfig.setKeepAliveIntervalSeconds(inSec(1, TimeUnit.SECONDS)); @@ -176,14 +187,14 @@ public void setup() throws Exception { doReturn(store).when(pulsar).getLocalMetadataStore(); doReturn(store).when(pulsar).getConfigurationMetadataStore(); - brokerService = spy(new BrokerService(pulsar, eventLoopGroup)); + brokerService = spyWithClassAndConstructorArgs(BrokerService.class, pulsar, eventLoopGroup); BrokerInterceptor interceptor = mock(BrokerInterceptor.class); doReturn(interceptor).when(brokerService).getInterceptor(); doReturn(brokerService).when(pulsar).getBrokerService(); doReturn(executor).when(pulsar).getOrderedExecutor(); - PulsarResources pulsarResources = spy(new PulsarResources(store, store)); - namespaceResources = spy(new NamespaceResources(store, store, 30)); + PulsarResources pulsarResources = spyWithClassAndConstructorArgs(PulsarResources.class, store, store); + namespaceResources = spyWithClassAndConstructorArgs(NamespaceResources.class, store, store, 30); doReturn(namespaceResources).when(pulsarResources).getNamespaceResources(); doReturn(pulsarResources).when(pulsar).getPulsarResources(); @@ -192,6 +203,7 @@ public void setup() throws Exception { doReturn(namespaceService).when(pulsar).getNamespaceService(); doReturn(true).when(namespaceService).isServiceUnitOwned(any()); doReturn(true).when(namespaceService).isServiceUnitActive(any()); + doReturn(CompletableFuture.completedFuture(true)).when(namespaceService).isServiceUnitActiveAsync(any()); doReturn(CompletableFuture.completedFuture(true)).when(namespaceService).checkTopicOwnership(any()); setupMLAsyncCallbackMocks(); @@ -457,7 +469,8 @@ public void testProducerOnNotOwnedTopic() throws Exception { setChannelConnected(); // Force the case where the broker doesn't own any topic - doReturn(false).when(namespaceService).isServiceUnitActive(any(TopicName.class)); + doReturn(CompletableFuture.completedFuture(false)).when(namespaceService) + .isServiceUnitActiveAsync(any(TopicName.class)); // test PRODUCER failure case ByteBuf clientCommand = Commands.newProducer(nonOwnedTopicName, 1 /* producer id */, 1 /* request id */, @@ -502,7 +515,8 @@ public void testProducerCommandWithAuthorizationPositive() throws Exception { @Test(timeOut = 30000) public void testNonExistentTopic() throws Exception { - AuthorizationService authorizationService = spy(new AuthorizationService(svcConfig, pulsar.getPulsarResources())); + AuthorizationService authorizationService = + spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsar.getPulsarResources()); doReturn(authorizationService).when(brokerService).getAuthorizationService(); doReturn(true).when(brokerService).isAuthorizationEnabled(); svcConfig.setAuthorizationEnabled(true); @@ -535,7 +549,8 @@ public void testNonExistentTopic() throws Exception { @Test(timeOut = 30000) public void testClusterAccess() throws Exception { svcConfig.setAuthorizationEnabled(true); - AuthorizationService authorizationService = spy(new AuthorizationService(svcConfig, pulsar.getPulsarResources())); + AuthorizationService authorizationService = + spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsar.getPulsarResources()); Field providerField = AuthorizationService.class.getDeclaredField("provider"); providerField.setAccessible(true); PulsarAuthorizationProvider authorizationProvider = spy(new PulsarAuthorizationProvider(svcConfig, @@ -566,7 +581,8 @@ public void testClusterAccess() throws Exception { @Test(timeOut = 30000) public void testNonExistentTopicSuperUserAccess() throws Exception { - AuthorizationService authorizationService = spy(new AuthorizationService(svcConfig, pulsar.getPulsarResources())); + AuthorizationService authorizationService = + spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsar.getPulsarResources()); doReturn(authorizationService).when(brokerService).getAuthorizationService(); doReturn(true).when(brokerService).isAuthorizationEnabled(); Field providerField = AuthorizationService.class.getDeclaredField("provider"); @@ -648,6 +664,28 @@ public void testSendCommand() throws Exception { channel.finish(); } + @Test(timeOut = 30000) + public void testSendCommandBeforeCreatingProducer() throws Exception { + resetChannel(); + setChannelConnected(); + + // test SEND before producer is created + MessageMetadata messageMetadata = new MessageMetadata() + .setPublishTime(System.currentTimeMillis()) + .setProducerName("prod-name") + .setSequenceId(0); + ByteBuf data = Unpooled.buffer(1024); + + ByteBuf clientCommand = ByteBufPair.coalesce(Commands.newSend(1, 0, 1, + ChecksumType.None, messageMetadata, data)); + channel.writeInbound(Unpooled.copiedBuffer(clientCommand)); + clientCommand.release(); + + // Then expect channel to close + Awaitility.await().atMost(10, TimeUnit.SECONDS).until(() -> !channel.isActive()); + channel.finish(); + } + @Test(timeOut = 30000) public void testUseSameProducerName() throws Exception { resetChannel(); @@ -816,6 +854,72 @@ public void testCreateProducerTimeout() throws Exception { channel.finish(); } + @Test(timeOut = 30000) + public void testCreateProducerTimeoutThenCreateSameNamedProducerShouldFail() throws Exception { + resetChannel(); + setChannelConnected(); + + // Delay the topic creation in a deterministic way + CompletableFuture openTopicFuture = new CompletableFuture<>(); + doAnswer(invocationOnMock -> { + openTopicFuture.complete(() -> { + ((OpenLedgerCallback) invocationOnMock.getArguments()[2]).openLedgerComplete(ledgerMock, null); + }); + return null; + }).when(mlFactoryMock).asyncOpen(matches(".*success.*"), any(ManagedLedgerConfig.class), + any(OpenLedgerCallback.class), any(Supplier.class), any()); + + // In a create producer timeout from client side we expect to see this sequence of commands : + // 1. create producer + // 2. close producer (when the timeout is triggered, which may be before the producer was created on the broker + // 3. create producer (triggered by reconnection logic) + // Then, when another producer is created with the same name, it should fail. Because we only have one + // channel here, we just use a different producer id + + // These operations need to be serialized, to allow the last create producer to finally succeed + // (There can be more create/close pairs in the sequence, depending on the client timeout + + String producerName = "my-producer"; + + ByteBuf createProducer1 = Commands.newProducer(successTopicName, 1 /* producer id */, 1 /* request id */, + producerName, Collections.emptyMap(), false); + channel.writeInbound(createProducer1); + + ByteBuf closeProducer = Commands.newCloseProducer(1 /* producer id */, 2 /* request id */ ); + channel.writeInbound(closeProducer); + + ByteBuf createProducer2 = Commands.newProducer(successTopicName, 1 /* producer id */, 3 /* request id */, + producerName, Collections.emptyMap(), false); + channel.writeInbound(createProducer2); + + // Complete the topic opening: It will make 2nd producer creation successful + openTopicFuture.get().run(); + + // Close succeeds + Object response = getResponse(); + assertEquals(response.getClass(), CommandSuccess.class); + assertEquals(((CommandSuccess) response).getRequestId(), 2); + + // 2nd producer will be successfully created as topic is open by then + response = getResponse(); + assertEquals(response.getClass(), CommandProducerSuccess.class); + assertEquals(((CommandProducerSuccess) response).getRequestId(), 3); + + // Send create command after getting the CommandProducerSuccess to ensure correct ordering + ByteBuf createProducer3 = Commands.newProducer(successTopicName, 2 /* producer id */, 4 /* request id */, + producerName, Collections.emptyMap(), false); + channel.writeInbound(createProducer3); + + // 3nd producer will fail + response = getResponse(); + assertEquals(response.getClass(), CommandError.class); + assertEquals(((CommandError) response).getRequestId(), 4); + + assertTrue(channel.isActive()); + + channel.finish(); + } + @Test(timeOut = 30000, enabled = false) public void testCreateProducerMultipleTimeouts() throws Exception { resetChannel(); @@ -1453,7 +1557,7 @@ protected void resetChannel() throws Exception { channel.close().get(); } serverCnx = new ServerCnx(pulsar); - serverCnx.authRole = ""; + serverCnx.setAuthRole(""); channel = new EmbeddedChannel(new LengthFieldBasedFrameDecoder( MaxMessageSize, 0, @@ -1715,4 +1819,146 @@ public void testTopicIsNotReady() throws Exception { channel.finish(); } + + @Test(enabled = false) + public void testNeverDelayConsumerFutureWhenNotFail() throws Exception{ + // Mock ServerCnx.field: consumers + ConcurrentLongHashMap.Builder mapBuilder = Mockito.mock(ConcurrentLongHashMap.Builder.class); + Mockito.when(mapBuilder.expectedItems(Mockito.anyInt())).thenReturn(mapBuilder); + Mockito.when(mapBuilder.concurrencyLevel(Mockito.anyInt())).thenReturn(mapBuilder); + ConcurrentLongHashMap consumers = Mockito.mock(ConcurrentLongHashMap.class); + Mockito.when(mapBuilder.build()).thenReturn(consumers); + ArgumentCaptor ignoreArgumentCaptor = ArgumentCaptor.forClass(Long.class); + final ArgumentCaptor deleteTimesMark = ArgumentCaptor.forClass(CompletableFuture.class); + Mockito.when(consumers.remove(ignoreArgumentCaptor.capture())).thenReturn(true); + Mockito.when(consumers.remove(ignoreArgumentCaptor.capture(), deleteTimesMark.capture())).thenReturn(true); + // case1: exists existingConsumerFuture, already complete or delay done after execute 'isDone()' many times + // case2: exists existingConsumerFuture, delay complete after execute 'isDone()' many times + // Why is the design so complicated, see: https://github.com/apache/pulsar/pull/15051 + // Try a delay of 3 stages. The simulation is successful after repeated judgments. + for(AtomicInteger futureWillDoneAfterDelayTimes = new AtomicInteger(1); + futureWillDoneAfterDelayTimes.intValue() <= 3; + futureWillDoneAfterDelayTimes.incrementAndGet()){ + final AtomicInteger futureCallTimes = new AtomicInteger(); + final Consumer mockConsumer = Mockito.mock(Consumer.class); + CompletableFuture existingConsumerFuture = new CompletableFuture(){ + + private boolean complete; + + // delay complete after execute 'isDone()' many times + @Override + public boolean isDone() { + if (complete) { + return true; + } + int executeIsDoneCommandTimes = futureCallTimes.incrementAndGet(); + return executeIsDoneCommandTimes >= futureWillDoneAfterDelayTimes.intValue(); + } + + // if trig "getNow()", then complete + @Override + public Consumer get(){ + complete = true; + return mockConsumer; + } + + // if trig "get()", then complete + @Override + public Consumer get(long timeout, TimeUnit unit){ + complete = true; + return mockConsumer; + } + + // if trig "get()", then complete + @Override + public Consumer getNow(Consumer ifAbsent){ + complete = true; + return mockConsumer; + } + + // never fail + public boolean isCompletedExceptionally(){ + return false; + } + }; + Mockito.when(consumers.putIfAbsent(Mockito.anyLong(), Mockito.any())).thenReturn(existingConsumerFuture); + // do test: delay complete after execute 'isDone()' many times + // Why is the design so complicated, see: https://github.com/apache/pulsar/pull/15051 + try (MockedStatic theMock = Mockito.mockStatic(ConcurrentLongHashMap.class)) { + // Inject consumers to ServerCnx + theMock.when(ConcurrentLongHashMap::newBuilder).thenReturn(mapBuilder); + // reset channels( serverChannel, clientChannel ) + resetChannel(); + setChannelConnected(); + // auth check disable + doReturn(false).when(brokerService).isAuthenticationEnabled(); + doReturn(false).when(brokerService).isAuthorizationEnabled(); + // do subscribe + ByteBuf clientCommand = Commands.newSubscribe(successTopicName, // + successSubName, 1 /* consumer id */, 1 /* request id */, SubType.Exclusive, 0, + "test" /* consumer name */, 0 /* avoid reseting cursor */); + channel.writeInbound(clientCommand); + Object responseObj = getResponse(); + Predicate responseAssert = obj -> { + if (responseObj instanceof CommandSuccess) { + return true; + } + if (responseObj instanceof CommandError) { + CommandError commandError = (CommandError) responseObj; + return ServerError.ServiceNotReady == commandError.getError(); + } + return false; + }; + // assert no consumer-delete event occur + assertFalse(deleteTimesMark.getAllValues().contains(existingConsumerFuture)); + // assert without another error occur + assertTrue(responseAssert.test(responseAssert)); + // Server will not close the connection + assertTrue(channel.isOpen()); + channel.finish(); + } + } + // case3: exists existingConsumerFuture, already complete and exception + CompletableFuture existingConsumerFuture = Mockito.mock(CompletableFuture.class); + Mockito.when(consumers.putIfAbsent(Mockito.anyLong(), Mockito.any())).thenReturn(existingConsumerFuture); + // make consumerFuture delay finish + Mockito.when(existingConsumerFuture.isDone()).thenReturn(true); + // when sync get return, future will return success value. + Mockito.when(existingConsumerFuture.get()).thenThrow(new NullPointerException()); + Mockito.when(existingConsumerFuture.get(Mockito.anyLong(), Mockito.any())). + thenThrow(new NullPointerException()); + Mockito.when(existingConsumerFuture.isCompletedExceptionally()).thenReturn(true); + Mockito.when(existingConsumerFuture.getNow(Mockito.any())).thenThrow(new NullPointerException()); + try (MockedStatic theMock = Mockito.mockStatic(ConcurrentLongHashMap.class)) { + // Inject consumers to ServerCnx + theMock.when(ConcurrentLongHashMap::newBuilder).thenReturn(mapBuilder); + // reset channels( serverChannel, clientChannel ) + resetChannel(); + setChannelConnected(); + // auth check disable + doReturn(false).when(brokerService).isAuthenticationEnabled(); + doReturn(false).when(brokerService).isAuthorizationEnabled(); + // do subscribe + ByteBuf clientCommand = Commands.newSubscribe(successTopicName, // + successSubName, 1 /* consumer id */, 1 /* request id */, SubType.Exclusive, 0, + "test" /* consumer name */, 0 /* avoid reseting cursor */); + channel.writeInbound(clientCommand); + Object responseObj = getResponse(); + Predicate responseAssert = obj -> { + if (responseObj instanceof CommandError) { + CommandError commandError = (CommandError) responseObj; + return ServerError.ServiceNotReady != commandError.getError(); + } + return false; + }; + // assert error response + assertTrue(responseAssert.test(responseAssert)); + // assert consumer-delete event occur + assertEquals(1L, + deleteTimesMark.getAllValues().stream().filter(f -> f == existingConsumerFuture).count()); + // Server will not close the connection + assertTrue(channel.isOpen()); + channel.finish(); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java index 8c5e96972354d..b59eb95512d91 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SubscriptionSeekTest.java @@ -187,7 +187,7 @@ public void testSeekForBatch() throws Exception { @Test public void testSeekForBatchMessageAndSpecifiedBatchIndex() throws Exception { - final String topicName = "persistent://prop/use/ns-abcd/testSeekForBatch"; + final String topicName = "persistent://prop/use/ns-abcd/testSeekForBatchMessageAndSpecifiedBatchIndex"; String subscriptionName = "my-subscription-batch"; Producer producer = pulsarClient.newProducer(Schema.STRING) @@ -560,7 +560,7 @@ public void testShouldCloseAllConsumersForMultipleConsumerDispatcherWhenSeek() t .subscriptionName("my-subscription") .subscribe(); - pulsarClient.newConsumer() + org.apache.pulsar.client.api.Consumer consumer2 = pulsarClient.newConsumer() .topic(topicName) .subscriptionType(SubscriptionType.Shared) .subscriptionName("my-subscription") @@ -579,12 +579,15 @@ public void testShouldCloseAllConsumersForMultipleConsumerDispatcherWhenSeek() t consumer1.seek(MessageId.earliest); // Wait for consumer to reconnect Awaitility.await().until(consumer1::isConnected); + Awaitility.await().until(consumer2::isConnected); consumers = topicRef.getSubscriptions().get("my-subscription").getConsumers(); assertEquals(consumers.size(), 2); for (Consumer consumer : consumers) { assertFalse(connectedSinceSet.contains(consumer.getStats().getConnectedSince())); } + consumer1.close(); + consumer2.close(); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java index 61365fe4d3e6b..8b5f4203c8447 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/SystemTopicBasedTopicPoliciesServiceTest.java @@ -18,27 +18,44 @@ */ package org.apache.pulsar.broker.service; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; import static org.testng.AssertJUnit.assertEquals; import static org.testng.AssertJUnit.assertNotNull; import static org.testng.AssertJUnit.assertNull; import static org.testng.AssertJUnit.assertTrue; import com.google.common.collect.Sets; +import java.lang.reflect.Field; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.BrokerServiceException.TopicPoliciesCacheNotInitException; import org.apache.pulsar.broker.systopic.NamespaceEventsSystemTopicFactory; +import org.apache.pulsar.broker.systopic.SystemTopicClient; +import org.apache.pulsar.broker.systopic.TopicPoliciesSystemTopicClient; import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.impl.Backoff; import org.apache.pulsar.client.impl.BackoffBuilder; +import org.apache.pulsar.client.impl.ReaderImpl; +import org.apache.pulsar.common.events.PulsarEvent; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicPolicies; +import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -59,7 +76,6 @@ public class SystemTopicBasedTopicPoliciesServiceTest extends MockedPulsarServic private static final TopicName TOPIC5 = TopicName.get("persistent", NamespaceName.get(NAMESPACE3), "topic-1"); private static final TopicName TOPIC6 = TopicName.get("persistent", NamespaceName.get(NAMESPACE3), "topic-2"); - private NamespaceEventsSystemTopicFactory systemTopicFactory; private SystemTopicBasedTopicPoliciesService systemTopicBasedTopicPoliciesService; @BeforeMethod(alwaysRun = true) @@ -77,6 +93,40 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @Test + public void testConcurrentlyRegisterUnregisterListeners() throws ExecutionException, InterruptedException { + TopicName topicName = TopicName.get("test"); + class TopicPolicyListenerImpl implements TopicPolicyListener { + + @Override + public void onUpdate(TopicPolicies data) { + //no op. + } + } + + CompletableFuture f = CompletableFuture.completedFuture(null).thenRunAsync(() -> { + for (int i = 0; i < 100; i++) { + TopicPolicyListener listener = new TopicPolicyListenerImpl(); + systemTopicBasedTopicPoliciesService.registerListener(topicName, listener); + Assert.assertNotNull(systemTopicBasedTopicPoliciesService.listeners.get(topicName)); + Assert.assertTrue(systemTopicBasedTopicPoliciesService.listeners.get(topicName).size() >= 1); + systemTopicBasedTopicPoliciesService.unregisterListener(topicName, listener); + } + }); + + for (int i = 0; i < 100; i++) { + TopicPolicyListener listener = new TopicPolicyListenerImpl(); + systemTopicBasedTopicPoliciesService.registerListener(topicName, listener); + Assert.assertNotNull(systemTopicBasedTopicPoliciesService.listeners.get(topicName)); + Assert.assertTrue(systemTopicBasedTopicPoliciesService.listeners.get(topicName).size() >= 1); + systemTopicBasedTopicPoliciesService.unregisterListener(topicName, listener); + } + + f.get(); + //Some system topics will be added to the listeners. Just check if it contains topicName. + Assert.assertFalse(systemTopicBasedTopicPoliciesService.listeners.containsKey(topicName)); + } + @Test public void testGetPolicy() throws ExecutionException, InterruptedException, TopicPoliciesCacheNotInitException { @@ -209,6 +259,30 @@ public void testCacheCleanup() throws Exception { assertNull(listMap.get(topicName)); } + @Test + public void testListenerCleanupByPartition() throws Exception { + final String topic = "persistent://" + NAMESPACE1 + "/test" + UUID.randomUUID(); + TopicName topicName = TopicName.get(topic); + admin.topics().createPartitionedTopic(topic, 3); + pulsarClient.newProducer().topic(topic).create().close(); + + Map>> listMap = + systemTopicBasedTopicPoliciesService.getListeners(); + Awaitility.await().untilAsserted(() -> { + // all 3 topic partition have registered the topic policy listeners. + assertEquals(listMap.get(topicName).size(), 3); + }); + + admin.topics().unload(topicName.getPartition(0).toString()); + assertEquals(listMap.get(topicName).size(), 2); + admin.topics().unload(topicName.getPartition(1).toString()); + assertEquals(listMap.get(topicName).size(), 1); + admin.topics().unload(topicName.getPartition(2).toString()); + assertNull(listMap.get(topicName)); + } + + + private void prepareData() throws PulsarAdminException { admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); admin.tenants().createTenant("system-topic", @@ -222,7 +296,6 @@ private void prepareData() throws PulsarAdminException { admin.lookups().lookupTopic(TOPIC4.toString()); admin.lookups().lookupTopic(TOPIC5.toString()); admin.lookups().lookupTopic(TOPIC6.toString()); - systemTopicFactory = new NamespaceEventsSystemTopicFactory(pulsarClient); systemTopicBasedTopicPoliciesService = (SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService(); } @@ -240,9 +313,67 @@ public void testGetPolicyTimeout() throws Exception { try { service.getTopicPoliciesAsyncWithRetry(TOPIC1, backoff, pulsar.getExecutor()).get(); } catch (Exception e) { - assertTrue(e.getCause().getCause() instanceof TopicPoliciesCacheNotInitException); + assertTrue(e.getCause() instanceof TopicPoliciesCacheNotInitException); } long cost = System.currentTimeMillis() - start; assertTrue("actual:" + cost, cost >= 5000 - 1000); } + + @Test + public void testCreatSystemTopicClientWithRetry() throws Exception { + SystemTopicBasedTopicPoliciesService service = + spy((SystemTopicBasedTopicPoliciesService) pulsar.getTopicPoliciesService()); + Field field = SystemTopicBasedTopicPoliciesService.class + .getDeclaredField("namespaceEventsSystemTopicFactory"); + field.setAccessible(true); + NamespaceEventsSystemTopicFactory factory = spy((NamespaceEventsSystemTopicFactory) field.get(service)); + SystemTopicClient client = mock(TopicPoliciesSystemTopicClient.class); + doReturn(client).when(factory).createTopicPoliciesSystemTopicClient(any()); + field.set(service, factory); + + SystemTopicClient.Reader reader = mock(SystemTopicClient.Reader.class); + // Throw an exception first, create successfully after retrying + doReturn(FutureUtil.failedFuture(new PulsarClientException("test"))) + .doReturn(CompletableFuture.completedFuture(reader)).when(client).newReaderAsync(); + + SystemTopicClient.Reader reader1 = service.createSystemTopicClientWithRetry(null).get(); + + assertEquals(reader1, reader); + } + + @Test + public void testGetTopicPoliciesWithRetry() throws Exception { + Field initMapField = SystemTopicBasedTopicPoliciesService.class.getDeclaredField("policyCacheInitMap"); + initMapField.setAccessible(true); + Map initMap = (Map)initMapField.get(systemTopicBasedTopicPoliciesService); + initMap.remove(NamespaceName.get(NAMESPACE1)); + Field readerCaches = SystemTopicBasedTopicPoliciesService.class.getDeclaredField("readerCaches"); + readerCaches.setAccessible(true); + Map>> readers = (Map)readerCaches.get(systemTopicBasedTopicPoliciesService); + readers.remove(NamespaceName.get(NAMESPACE1)); + Backoff backoff = new BackoffBuilder() + .setInitialTime(500, TimeUnit.MILLISECONDS) + .setMandatoryStop(5000, TimeUnit.MILLISECONDS) + .setMax(1000, TimeUnit.MILLISECONDS) + .create(); + TopicPolicies initPolicy = TopicPolicies.builder() + .maxConsumerPerTopic(10) + .build(); + ScheduledExecutorService executors = Executors.newScheduledThreadPool(1); + executors.schedule(new Runnable() { + @Override + public void run() { + try { + systemTopicBasedTopicPoliciesService.updateTopicPoliciesAsync(TOPIC1, initPolicy).get(); + } catch (Exception ignore) {} + } + }, 2000, TimeUnit.MILLISECONDS); + Awaitility.await().untilAsserted(() -> { + Optional topicPolicies = systemTopicBasedTopicPoliciesService.getTopicPoliciesAsyncWithRetry(TOPIC1, backoff, pulsar.getExecutor()).get(); + Assert.assertTrue(topicPolicies.isPresent()); + if (topicPolicies.isPresent()) { + Assert.assertEquals(topicPolicies.get(), initPolicy); + } + }); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicTerminationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicTerminationTest.java index 0cd84f3a84602..dedc9904ce846 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicTerminationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TopicTerminationTest.java @@ -31,6 +31,8 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; + +import io.netty.util.HashedWheelTimer; import org.apache.pulsar.client.admin.PulsarAdminException.NotAllowedException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; @@ -41,8 +43,10 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.ReaderListener; +import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -108,6 +112,30 @@ public void testCreateProducerOnTerminatedTopic() throws Exception { } } + public void testCreatingProducerTasksCleanupWhenOnTerminatedTopic() throws Exception { + Producer producer = pulsarClient.newProducer().topic(topicName) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + + producer.send("msg-1".getBytes()); + producer.send("msg-2".getBytes()); + MessageId msgId3 = producer.send("msg-3".getBytes()); + + MessageId lastMessageId = admin.topics().terminateTopicAsync(topicName).get(); + assertEquals(lastMessageId, msgId3); + producer.close(); + + try { + pulsarClient.newProducer().topic(topicName).create(); + fail("Should have thrown exception"); + } catch (PulsarClientException.TopicTerminatedException e) { + // Expected + } + HashedWheelTimer timer = (HashedWheelTimer) ((PulsarClientImpl) pulsarClient).timer(); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(timer.pendingTimeouts(), 0)); + } + @Test(timeOut = 20000) public void testTerminateWhilePublishing() throws Exception { Producer producer = pulsarClient.newProducer().topic(topicName) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMarkerDeleteTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMarkerDeleteTest.java index f25b346a3360a..aa2a8d49e9c95 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMarkerDeleteTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMarkerDeleteTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.broker.service; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -25,45 +26,39 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.PooledByteBufAllocator; +import static org.testng.Assert.assertNull; import java.util.Collections; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import org.apache.bookkeeper.mledger.ManagedCursor; -import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; -import org.apache.commons.lang3.tuple.MutablePair; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; -import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.api.transaction.Transaction; +import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.common.api.proto.CommandAck.AckType; -import org.apache.pulsar.common.api.proto.MessageMetadata; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.common.protocol.Commands; -import org.apache.pulsar.common.protocol.Markers; import org.awaitility.Awaitility; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import org.testng.collections.Sets; @Test(groups = "broker") -public class TransactionMarkerDeleteTest extends BrokerTestBase { +public class TransactionMarkerDeleteTest extends TransactionTestBase { + private static final int TOPIC_PARTITION = 3; + private static final String TOPIC_OUTPUT = NAMESPACE1 + "/output"; + private static final int NUM_PARTITIONS = 16; @BeforeMethod - @Override protected void setup() throws Exception { - conf.setTransactionCoordinatorEnabled(true); - super.baseSetup(); - admin.tenants().createTenant("public", - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet("test"))); - - admin.namespaces().createNamespace("public/default"); + setUpBase(1, NUM_PARTITIONS, TOPIC_OUTPUT, TOPIC_PARTITION); } @AfterMethod(alwaysRun = true) @@ -74,7 +69,8 @@ protected void cleanup() throws Exception { @Test public void testMarkerDeleteTimes() throws Exception { - ManagedLedgerImpl managedLedger = spy((ManagedLedgerImpl) pulsar.getManagedLedgerFactory().open("test")); + ManagedLedgerImpl managedLedger = + spy((ManagedLedgerImpl) getPulsarServiceList().get(0).getManagedLedgerFactory().open("test")); PersistentTopic topic = mock(PersistentTopic.class); BrokerService brokerService = mock(BrokerService.class); PulsarService pulsarService = mock(PulsarService.class); @@ -85,8 +81,8 @@ public void testMarkerDeleteTimes() throws Exception { doReturn(false).when(configuration).isTransactionCoordinatorEnabled(); doReturn(managedLedger).when(topic).getManagedLedger(); ManagedCursor cursor = managedLedger.openCursor("test"); - PersistentSubscription persistentSubscription = spy(new PersistentSubscription(topic, "test", - cursor, false)); + PersistentSubscription persistentSubscription = + spyWithClassAndConstructorArgs(PersistentSubscription.class, topic, "test", cursor, false); Position position = managedLedger.addEntry("test".getBytes()); persistentSubscription.acknowledgeMessage(Collections.singletonList(position), AckType.Individual, Collections.emptyMap()); @@ -96,84 +92,74 @@ public void testMarkerDeleteTimes() throws Exception { @Test public void testMarkerDelete() throws Exception { - - MessageMetadata msgMetadata = new MessageMetadata().clear() - .setPublishTime(1) - .setProducerName("test") - .setSequenceId(0); - - ByteBuf payload = PooledByteBufAllocator.DEFAULT.buffer(0); - - payload = Commands.serializeMetadataAndPayload(Commands.ChecksumType.Crc32c, - msgMetadata, payload); - - ManagedLedger managedLedger = pulsar.getManagedLedgerFactory().open("test"); - PersistentTopic topic = mock(PersistentTopic.class); - doReturn(pulsar.getBrokerService()).when(topic).getBrokerService(); - doReturn(managedLedger).when(topic).getManagedLedger(); - doReturn("test").when(topic).getName(); - ManagedCursor cursor = managedLedger.openCursor("test"); - PersistentSubscription persistentSubscription = new PersistentSubscription(topic, "test", - managedLedger.openCursor("test"), false); - - byte[] payloadBytes = toBytes(payload); - Position position1 = managedLedger.addEntry(payloadBytes); - Position markerPosition1 = managedLedger.addEntry(toBytes(Markers - .newTxnCommitMarker(1, 1, 1))); - - Position position2 = managedLedger.addEntry(payloadBytes); - Position markerPosition2 = managedLedger.addEntry(toBytes(Markers - .newTxnAbortMarker(1, 1, 1))); - - Position position3 = managedLedger.addEntry(payloadBytes); - - assertEquals(cursor.getNumberOfEntriesInBacklog(true), 5); - assertTrue(((PositionImpl) cursor.getMarkDeletedPosition()).compareTo((PositionImpl) position1) < 0); - - // ack position1, markerDeletePosition to markerPosition1 - persistentSubscription.acknowledgeMessage(Collections.singletonList(position1), - AckType.Individual, Collections.emptyMap()); - - // ack position1, markerDeletePosition to markerPosition1 - Awaitility.await().during(1, TimeUnit.SECONDS).until(() -> - ((PositionImpl) persistentSubscription.getCursor().getMarkDeletedPosition()) - .compareTo((PositionImpl) markerPosition1) == 0); - - // ack position2, markerDeletePosition to markerPosition2 - persistentSubscription.acknowledgeMessage(Collections.singletonList(position2), - AckType.Individual, Collections.emptyMap()); - - Awaitility.await().until(() -> - ((PositionImpl) persistentSubscription.getCursor().getMarkDeletedPosition()) - .compareTo((PositionImpl) markerPosition2) == 0); - - // add consequent marker - managedLedger.addEntry(toBytes(Markers - .newTxnCommitMarker(1, 1, 1))); - - managedLedger.addEntry(toBytes(Markers - .newTxnAbortMarker(1, 1, 1))); - - Position markerPosition3 = managedLedger.addEntry(toBytes(Markers - .newTxnAbortMarker(1, 1, 1))); - - // ack with transaction, then commit this transaction - persistentSubscription.transactionIndividualAcknowledge(new TxnID(0, 0), - Collections.singletonList(MutablePair.of((PositionImpl) position3, 0))).get(); - - persistentSubscription.endTxn(0, 0, 0, 0).get(); - - // ack with transaction, then commit this transaction - Awaitility.await().until(() -> - ((PositionImpl) persistentSubscription.getCursor().getMarkDeletedPosition()) - .compareTo((PositionImpl) markerPosition3) == 0); - + final String subName = "testMarkerDelete"; + final String topicName = NAMESPACE1 + "/testMarkerDelete"; + @Cleanup + Consumer consumer = pulsarClient + .newConsumer() + .topic(topicName) + .subscriptionName(subName) + .isAckReceiptEnabled(true) + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + Producer producer = pulsarClient + .newProducer() + .sendTimeout(0, TimeUnit.SECONDS) + .topic(topicName) + .create(); + + Transaction txn1 = getTxn(); + Transaction txn2 = getTxn(); + Transaction txn3 = getTxn(); + Transaction txn4 = getTxn(); + + MessageIdImpl msgId1 = (MessageIdImpl) producer.newMessage(txn1).send(); + MessageIdImpl msgId2 = (MessageIdImpl) producer.newMessage(txn2).send(); + assertNull(consumer.receive(1, TimeUnit.SECONDS)); + txn1.commit().get(); + + consumer.acknowledgeAsync(consumer.receive()).get(); + assertNull(consumer.receive(1, TimeUnit.SECONDS)); + + // maxReadPosition move to msgId1, msgId2 have not be committed + assertEquals(admin.topics().getInternalStats(topicName).cursors.get(subName).markDeletePosition, + PositionImpl.get(msgId1.getLedgerId(), msgId1.getEntryId()).toString()); + + MessageIdImpl msgId3 = (MessageIdImpl) producer.newMessage(txn3).send(); + txn2.commit().get(); + + consumer.acknowledgeAsync(consumer.receive()).get(); + assertNull(consumer.receive(1, TimeUnit.SECONDS)); + + // maxReadPosition move to txn1 marker, so entryId is msgId2.getEntryId() + 1, + // because send msgId2 before commit txn1 + assertEquals(admin.topics().getInternalStats(topicName).cursors.get(subName).markDeletePosition, + PositionImpl.get(msgId2.getLedgerId(), msgId2.getEntryId() + 1).toString()); + + MessageIdImpl msgId4 = (MessageIdImpl) producer.newMessage(txn4).send(); + txn3.commit().get(); + + consumer.acknowledgeAsync(consumer.receive()).get(); + assertNull(consumer.receive(1, TimeUnit.SECONDS)); + + // maxReadPosition move to txn2 marker, because msgId4 have not be committed + assertEquals(admin.topics().getInternalStats(topicName).cursors.get(subName).markDeletePosition, + PositionImpl.get(msgId3.getLedgerId(), msgId3.getEntryId() + 1).toString()); + + txn4.abort().get(); + + // maxReadPosition move to txn4 abort marker, so entryId is msgId4.getEntryId() + 2 + Awaitility.await().untilAsserted(() -> assertEquals(admin.topics().getInternalStats(topicName) + .cursors.get(subName).markDeletePosition, PositionImpl.get(msgId4.getLedgerId(), + msgId4.getEntryId() + 2).toString())); } - static byte[] toBytes(ByteBuf byteBuf) { - byte[] buf = new byte[byteBuf.readableBytes()]; - byteBuf.readBytes(buf); - byteBuf.release(); - return buf; + private Transaction getTxn() throws Exception { + return pulsarClient + .newTransaction() + .withTransactionTimeout(10, TimeUnit.SECONDS) + .build() + .get(); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMetadataStoreServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMetadataStoreServiceTest.java index 983f0d1dacf57..e50a39c5dfd91 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMetadataStoreServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/TransactionMetadataStoreServiceTest.java @@ -25,11 +25,13 @@ import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import com.google.common.collect.Sets; +import java.util.concurrent.TimeoutException; import org.apache.bookkeeper.mledger.Position; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.ServiceConfiguration; @@ -351,13 +353,15 @@ public void testEndTransactionOpRetry(TxnStatus txnStatus) throws Exception { Field field = TransactionMetadataStoreState.class.getDeclaredField("state"); field.setAccessible(true); field.set(transactionMetadataStore, TransactionMetadataStoreState.State.None); - + CompletableFuture completableFuture = null; try { - pulsar.getTransactionMetadataStoreService().endTransaction(txnID, TxnAction.COMMIT.getValue(), false).get(); + completableFuture = pulsar.getTransactionMetadataStoreService().endTransaction(txnID, TxnAction.COMMIT.getValue(), + false); + completableFuture.get(5, TimeUnit.SECONDS); fail(); } catch (Exception e) { if (txnStatus == TxnStatus.OPEN || txnStatus == TxnStatus.COMMITTING) { - assertTrue(e.getCause() instanceof CoordinatorException.TransactionMetadataStoreStateException); + assertTrue(e instanceof TimeoutException); } else if (txnStatus == TxnStatus.ABORTING) { assertTrue(e.getCause() instanceof CoordinatorException.InvalidTxnStatusException); } else { @@ -370,9 +374,9 @@ public void testEndTransactionOpRetry(TxnStatus txnStatus) throws Exception { field = TransactionMetadataStoreState.class.getDeclaredField("state"); field.setAccessible(true); field.set(transactionMetadataStore, TransactionMetadataStoreState.State.Ready); - if (txnStatus == TxnStatus.ABORTING) { - pulsar.getTransactionMetadataStoreService().endTransaction(txnID, TxnAction.ABORT.getValue(), false).get(); + pulsar.getTransactionMetadataStoreService() + .endTransaction(txnID, TxnAction.ABORT.getValue(), false).get(); } Awaitility.await().atMost(timeOut, TimeUnit.MILLISECONDS).until(() -> { try { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentStickyKeyDispatcherMultipleConsumersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentStickyKeyDispatcherMultipleConsumersTest.java index 990bd8f4b5acf..4a2b0f48e7c50 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentStickyKeyDispatcherMultipleConsumersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentStickyKeyDispatcherMultipleConsumersTest.java @@ -71,7 +71,6 @@ public class NonPersistentStickyKeyDispatcherMultipleConsumersTest { private NonPersistentTopic topicMock; private NonPersistentSubscription subscriptionMock; private ServiceConfiguration configMock; - private ChannelPromise channelMock; private NonPersistentStickyKeyDispatcherMultipleConsumers nonpersistentDispatcher; @@ -100,7 +99,6 @@ public void setup() throws Exception { doReturn(brokerMock).when(topicMock).getBrokerService(); doReturn(topicName).when(topicMock).getName(); - channelMock = mock(ChannelPromise.class); subscriptionMock = mock(NonPersistentSubscription.class); PowerMockito.mockStatic(DispatchRateLimiter.class); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/DelayedDeliveryTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/DelayedDeliveryTest.java index cb870f8a70e81..fc94f4d72c063 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/DelayedDeliveryTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/DelayedDeliveryTest.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.UUID; @@ -34,6 +35,7 @@ import lombok.Cleanup; +import org.apache.bookkeeper.client.BKException; import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.service.Dispatcher; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -61,6 +63,7 @@ public void setup() throws Exception { conf.setSystemTopicEnabled(true); conf.setTopicLevelPoliciesEnabled(true); conf.setDelayedDeliveryTickTimeMillis(1024); + conf.setDispatcherReadFailureBackoffInitialTimeInMs(1000); super.internalSetup(); super.producerBaseSetup(); } @@ -493,4 +496,135 @@ public void testClearDelayedMessagesWhenClearBacklog() throws PulsarClientExcept admin.topics().skipAllMessages(topic, subName); Awaitility.await().untilAsserted(() -> Assert.assertEquals(dispatcher.getNumberOfDelayedMessages(), 0)); } + + @Test + public void testDelayedDeliveryWithAllConsumersDisconnecting() throws Exception { + String topic = BrokerTestUtil.newUniqueName("persistent://public/default/testDelays"); + + Consumer c1 = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + producer.newMessage() + .value("msg") + .deliverAfter(5, TimeUnit.SECONDS) + .send(); + + Dispatcher dispatcher = pulsar.getBrokerService().getTopicReference(topic).get().getSubscription("sub").getDispatcher(); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(dispatcher.getNumberOfDelayedMessages(), 1)); + + c1.close(); + + // Attach a new consumer. Since there are no consumers connected, this will trigger the cursor rewind + @Cleanup + Consumer c2 = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Shared) + .receiverQueueSize(1) + .subscribe(); + + Awaitility.await().untilAsserted(() -> Assert.assertEquals(dispatcher.getNumberOfDelayedMessages(), 1)); + + Message msg = c2.receive(10, TimeUnit.SECONDS); + assertNotNull(msg); + + // No more messages + msg = c2.receive(1, TimeUnit.SECONDS); + assertNull(msg); + + Awaitility.await().untilAsserted(() -> Assert.assertEquals(dispatcher.getNumberOfDelayedMessages(), 0)); + } + + @Test + public void testInterleavedMessagesOnKeySharedSubscription() throws Exception { + String topic = BrokerTestUtil.newUniqueName("testInterleavedMessagesOnKeySharedSubscription"); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("key-shared-sub") + .subscriptionType(SubscriptionType.Key_Shared) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + Random random = new Random(0); + for (int i = 0; i < 10; i++) { + // Publish 1 message without delay and 1 with delay + producer.newMessage() + .value("immediate-msg-" + i) + .sendAsync(); + + int delayMillis = 1000 + random.nextInt(1000); + producer.newMessage() + .value("delayed-msg-" + i) + .deliverAfter(delayMillis, TimeUnit.MILLISECONDS) + .sendAsync(); + Thread.sleep(1000); + } + + producer.flush(); + + Set receivedMessages = new HashSet<>(); + + while (receivedMessages.size() < 20) { + Message msg = consumer.receive(3, TimeUnit.SECONDS); + receivedMessages.add(msg.getValue()); + consumer.acknowledge(msg); + } + } + + @Test + public void testDispatcherReadFailure() throws Exception { + String topic = BrokerTestUtil.newUniqueName("testDispatcherReadFailure"); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("shared-sub") + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + for (int i = 0; i < 10; i++) { + producer.newMessage() + .value("msg-" + i) + .deliverAfter(5, TimeUnit.SECONDS) + .sendAsync(); + } + + producer.flush(); + + Message msg = consumer.receive(100, TimeUnit.MILLISECONDS); + assertNull(msg); + + // Inject failure in BK read + this.mockBookKeeper.failNow(BKException.Code.ReadException); + + Set receivedMsgs = new TreeSet<>(); + for (int i = 0; i < 10; i++) { + msg = consumer.receive(10, TimeUnit.SECONDS); + receivedMsgs.add(msg.getValue()); + } + + assertEquals(receivedMsgs.size(), 10); + for (int i = 0; i < 10; i++) { + assertTrue(receivedMsgs.contains("msg-" + i)); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java index bfef2b705d4ba..0e1b37b5160ae 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java @@ -30,7 +30,14 @@ import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.Commands; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.awaitility.Awaitility; import org.testng.annotations.Test; + +import java.lang.reflect.Field; +import java.util.Map; + +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.apache.pulsar.common.protocol.Commands.serializeMetadataAndPayload; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -40,8 +47,11 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; +import org.apache.bookkeeper.mledger.ManagedCursor; @Slf4j @Test(groups = "broker") @@ -62,7 +72,8 @@ public void testIsDuplicate() { doReturn(serviceConfiguration).when(pulsarService).getConfiguration(); PersistentTopic persistentTopic = mock(PersistentTopic.class); ManagedLedger managedLedger = mock(ManagedLedger.class); - MessageDeduplication messageDeduplication = spy(new MessageDeduplication(pulsarService, persistentTopic, managedLedger)); + MessageDeduplication messageDeduplication = + spyWithClassAndConstructorArgs(MessageDeduplication.class, pulsarService, persistentTopic, managedLedger); doReturn(true).when(messageDeduplication).isEnabled(); String producerName1 = "producer1"; @@ -142,6 +153,79 @@ public void testIsDuplicate() { assertEquals(lastSequenceIdPushed.longValue(), 5); } + @Test + public void testInactiveProducerRemove() throws Exception { + PulsarService pulsarService = mock(PulsarService.class); + PersistentTopic topic = mock(PersistentTopic.class); + ManagedLedger managedLedger = mock(ManagedLedger.class); + + ServiceConfiguration serviceConfiguration = new ServiceConfiguration(); + serviceConfiguration.setBrokerDeduplicationEntriesInterval(BROKER_DEDUPLICATION_ENTRIES_INTERVAL); + serviceConfiguration.setBrokerDeduplicationMaxNumberOfProducers(BROKER_DEDUPLICATION_MAX_NUMBER_PRODUCERS); + serviceConfiguration.setReplicatorPrefix(REPLICATOR_PREFIX); + serviceConfiguration.setBrokerDeduplicationProducerInactivityTimeoutMinutes(1); + + doReturn(serviceConfiguration).when(pulsarService).getConfiguration(); + MessageDeduplication messageDeduplication = + spyWithClassAndConstructorArgs(MessageDeduplication.class, pulsarService, topic, managedLedger); + doReturn(true).when(messageDeduplication).isEnabled(); + + ManagedCursor managedCursor = mock(ManagedCursor.class); + doReturn(managedCursor).when(messageDeduplication).getManagedCursor(); + + Topic.PublishContext publishContext = mock(Topic.PublishContext.class); + + Field field = MessageDeduplication.class.getDeclaredField("inactiveProducers"); + field.setAccessible(true); + Map inactiveProducers = (Map) field.get(messageDeduplication); + + String producerName1 = "test1"; + when(publishContext.getHighestSequenceId()).thenReturn(2L); + when(publishContext.getSequenceId()).thenReturn(1L); + when(publishContext.getProducerName()).thenReturn(producerName1); + messageDeduplication.isDuplicate(publishContext, null); + + String producerName2 = "test2"; + when(publishContext.getProducerName()).thenReturn(producerName2); + messageDeduplication.isDuplicate(publishContext, null); + + String producerName3 = "test3"; + when(publishContext.getProducerName()).thenReturn(producerName3); + messageDeduplication.isDuplicate(publishContext, null); + + // All 3 are added to the inactiveProducers list + messageDeduplication.producerRemoved(producerName1); + messageDeduplication.producerRemoved(producerName2); + messageDeduplication.producerRemoved(producerName3); + + // Try first purgeInactive, all producer not inactive. + messageDeduplication.purgeInactiveProducers(); + assertEquals(inactiveProducers.size(), 3); + + doReturn(false).when(messageDeduplication).isEnabled(); + inactiveProducers.put(producerName2, System.currentTimeMillis() - 80000); + inactiveProducers.put(producerName3, System.currentTimeMillis() - 80000); + messageDeduplication.purgeInactiveProducers(); + assertFalse(inactiveProducers.containsKey(producerName2)); + assertFalse(inactiveProducers.containsKey(producerName3)); + doReturn(true).when(messageDeduplication).isEnabled(); + // Modify the inactive time of produce2 and produce3 + // messageDeduplication.purgeInactiveProducers() will remove producer2 and producer3 + inactiveProducers.put(producerName2, System.currentTimeMillis() - 70000); + inactiveProducers.put(producerName3, System.currentTimeMillis() - 70000); + // Try second purgeInactive, produce2 and produce3 is inactive. + messageDeduplication.purgeInactiveProducers(); + assertFalse(inactiveProducers.containsKey(producerName2)); + assertFalse(inactiveProducers.containsKey(producerName3)); + field = MessageDeduplication.class.getDeclaredField("highestSequencedPushed"); + field.setAccessible(true); + ConcurrentOpenHashMap highestSequencedPushed = (ConcurrentOpenHashMap) field.get(messageDeduplication); + + assertEquals((long) highestSequencedPushed.get(producerName1), 2L); + assertFalse(highestSequencedPushed.containsKey(producerName2)); + assertFalse(highestSequencedPushed.containsKey(producerName3)); + } + @Test public void testIsDuplicateWithFailure() { @@ -171,7 +255,9 @@ public void testIsDuplicateWithFailure() { doReturn(eventLoopGroup).when(brokerService).executor(); doReturn(pulsarService).when(brokerService).pulsar(); - PersistentTopic persistentTopic = spy(new PersistentTopic("topic-1", brokerService, managedLedger, messageDeduplication)); + PersistentTopic persistentTopic = + spyWithClassAndConstructorArgs(PersistentTopic.class, "topic-1", + brokerService, managedLedger, messageDeduplication); String producerName1 = "producer1"; ByteBuf byteBuf1 = getMessage(producerName1, 0); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java index 9a785f6f95fd6..3cd6fc23de744 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java @@ -30,8 +30,8 @@ import java.util.Set; import java.util.TreeSet; import org.apache.bookkeeper.mledger.impl.PositionImpl; -import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap; -import org.apache.pulsar.common.util.collections.LongPairSet; +import org.apache.pulsar.utils.ConcurrentBitmapSortedLongPairSet; +import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -48,7 +48,8 @@ public void testAddAndRemove(boolean allowOutOfOrderDelivery) throws Exception { Field messagesToRedeliverField = MessageRedeliveryController.class.getDeclaredField("messagesToRedeliver"); messagesToRedeliverField.setAccessible(true); - LongPairSet messagesToRedeliver = (LongPairSet) messagesToRedeliverField.get(controller); + ConcurrentBitmapSortedLongPairSet messagesToRedeliver = + (ConcurrentBitmapSortedLongPairSet) messagesToRedeliverField.get(controller); Field hashesToBeBlockedField = MessageRedeliveryController.class.getDeclaredField("hashesToBeBlocked"); hashesToBeBlockedField.setAccessible(true); @@ -67,9 +68,8 @@ public void testAddAndRemove(boolean allowOutOfOrderDelivery) throws Exception { assertEquals(hashesToBeBlocked.size(), 0); } - assertTrue(controller.add(1, 1)); - assertTrue(controller.add(1, 2)); - assertFalse(controller.add(1, 1)); + controller.add(1, 1); + controller.add(1, 2); assertFalse(controller.isEmpty()); assertEquals(messagesToRedeliver.size(), 2); @@ -81,9 +81,8 @@ public void testAddAndRemove(boolean allowOutOfOrderDelivery) throws Exception { assertFalse(hashesToBeBlocked.containsKey(1, 2)); } - assertTrue(controller.remove(1, 1)); - assertTrue(controller.remove(1, 2)); - assertFalse(controller.remove(1, 1)); + controller.remove(1, 1); + controller.remove(1, 2); assertTrue(controller.isEmpty()); assertEquals(messagesToRedeliver.size(), 0); @@ -93,10 +92,9 @@ public void testAddAndRemove(boolean allowOutOfOrderDelivery) throws Exception { assertEquals(hashesToBeBlocked.size(), 0); } - assertTrue(controller.add(2, 1, 100)); - assertTrue(controller.add(2, 2, 101)); - assertTrue(controller.add(2, 3, 101)); - assertFalse(controller.add(2, 1, 100)); + controller.add(2, 1, 100); + controller.add(2, 2, 101); + controller.add(2, 3, 101); assertFalse(controller.isEmpty()); assertEquals(messagesToRedeliver.size(), 3); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java index 3cb5bfbdc4456..6d32a83ffc5c2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentStickyKeyDispatcherMultipleConsumersTest.java @@ -21,6 +21,17 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelPromise; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import io.netty.channel.EventLoopGroup; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.EntryImpl; @@ -118,6 +129,13 @@ public void setup() throws Exception { brokerMock = mock(BrokerService.class); doReturn(pulsarMock).when(brokerMock).pulsar(); + EventLoopGroup eventLoopGroup = mock(EventLoopGroup.class); + doReturn(eventLoopGroup).when(brokerMock).executor(); + doAnswer(invocation -> { + ((Runnable)invocation.getArguments()[0]).run(); + return null; + }).when(eventLoopGroup).execute(any(Runnable.class)); + topicMock = mock(PersistentTopic.class); doReturn(brokerMock).when(topicMock).getBrokerService(); doReturn(topicName).when(topicMock).getName(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java index 76f485ebb42a3..946f90a1ddd3c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentSubscriptionTest.java @@ -18,10 +18,10 @@ */ package org.apache.pulsar.broker.service.persistent; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockBookKeeper; import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.createMockZooKeeper; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -40,7 +40,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ExecutorService; import org.apache.bookkeeper.common.util.OrderedExecutor; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.ManagedLedger; @@ -57,7 +57,6 @@ import org.apache.pulsar.broker.resources.NamespaceResources; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.broker.service.BrokerService; -import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.service.Consumer; import org.apache.pulsar.broker.transaction.buffer.impl.InMemTransactionBufferProvider; import org.apache.pulsar.broker.transaction.pendingack.PendingAckStore; @@ -73,20 +72,14 @@ import org.apache.pulsar.metadata.api.MetadataStore; import org.apache.pulsar.metadata.impl.ZKMetadataStore; import org.apache.pulsar.transaction.common.exception.TransactionConflictException; -import org.apache.pulsar.zookeeper.ZooKeeperCache; -import org.apache.pulsar.zookeeper.ZooKeeperDataCache; import org.apache.zookeeper.ZooKeeper; import org.awaitility.Awaitility; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -@PrepareForTest({ ZooKeeperDataCache.class, BrokerService.class }) -@PowerMockIgnore({"org.apache.logging.log4j.*"}) @Test(groups = "broker") public class PersistentSubscriptionTest { @@ -117,10 +110,10 @@ public void setup() throws Exception { executor = OrderedExecutor.newBuilder().numThreads(1).name("persistent-subscription-test").build(); eventLoopGroup = new NioEventLoopGroup(); - ServiceConfiguration svcConfig = spy(new ServiceConfiguration()); + ServiceConfiguration svcConfig = spy(ServiceConfiguration.class); svcConfig.setBrokerShutdownTimeoutMs(0L); svcConfig.setTransactionCoordinatorEnabled(true); - pulsarMock = spy(new PulsarService(svcConfig)); + pulsarMock = spyWithClassAndConstructorArgs(PulsarService.class, svcConfig); PulsarResources pulsarResources = mock(PulsarResources.class); doReturn(pulsarResources).when(pulsarMock).getPulsarResources(); NamespaceResources namespaceResources = mock(NamespaceResources.class); @@ -134,7 +127,7 @@ public void setup() throws Exception { public CompletableFuture newPendingAckStore(PersistentSubscription subscription) { return CompletableFuture.completedFuture(new PendingAckStore() { @Override - public void replayAsync(PendingAckHandleImpl pendingAckHandle, ScheduledExecutorService executorService) { + public void replayAsync(PendingAckHandleImpl pendingAckHandle, ExecutorService executorService) { try { Field field = PendingAckHandleState.class.getDeclaredField("state"); field.setAccessible(true); @@ -190,7 +183,7 @@ public CompletableFuture checkInitializedBefore(PersistentSubscription doReturn(store).when(pulsarMock).getLocalMetadataStore(); doReturn(store).when(pulsarMock).getConfigurationMetadataStore(); - brokerMock = spy(new BrokerService(pulsarMock, eventLoopGroup)); + brokerMock = spyWithClassAndConstructorArgs(BrokerService.class, pulsarMock, eventLoopGroup); doNothing().when(brokerMock).unloadNamespaceBundlesGracefully(); doReturn(brokerMock).when(pulsarMock).getBrokerService(); @@ -271,7 +264,7 @@ public void testCanAcknowledgeAndCommitForTransaction() throws ExecutionExceptio } @Test - public void testCanAcknowledgeAndAbortForTransaction() throws BrokerServiceException, InterruptedException { + public void testCanAcknowledgeAndAbortForTransaction() throws Exception { List> positionsPair = new ArrayList<>(); positionsPair.add(new MutablePair<>(new PositionImpl(2, 1), 0)); positionsPair.add(new MutablePair<>(new PositionImpl(2, 3), 0)); @@ -300,7 +293,7 @@ public void testCanAcknowledgeAndAbortForTransaction() throws BrokerServiceExcep positions.add(new PositionImpl(1, 100)); // Cumulative ack for txn1 - persistentSubscription.transactionCumulativeAcknowledge(txnID1, positions); + persistentSubscription.transactionCumulativeAcknowledge(txnID1, positions).get(); positions.clear(); positions.add(new PositionImpl(2, 1)); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/JsonSchemaCompatibilityCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/JsonSchemaCompatibilityCheckTest.java index 32a9f9e78a874..9bf0189037a3a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/JsonSchemaCompatibilityCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/schema/JsonSchemaCompatibilityCheckTest.java @@ -33,7 +33,6 @@ import org.apache.pulsar.client.api.SchemaSerializationException; import org.apache.pulsar.client.api.schema.SchemaDefinition; import org.apache.pulsar.client.impl.schema.JSONSchema; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.protocol.schema.SchemaData; import org.apache.pulsar.common.schema.SchemaInfo; @@ -119,7 +118,7 @@ public static OldJSONSchema of(Class pojo, Map propert JsonSchemaGenerator schemaGen = new JsonSchemaGenerator(mapper); JsonSchema schema = schemaGen.generateSchema(pojo); - SchemaInfo info = SchemaInfoImpl.builder() + SchemaInfo info = SchemaInfo.builder() .name("") .properties(properties) .type(SchemaType.JSON) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java index e58859d5f201e..217ac0f290306 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/streamingdispatch/StreamingEntryReaderTests.java @@ -38,8 +38,6 @@ import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; import org.testng.annotations.Test; import java.nio.charset.Charset; @@ -66,15 +64,7 @@ /** * Tests for {@link StreamingEntryReader} */ -@PowerMockIgnore({ - "javax.management.*", - "javax.xml.parsers.*", - "com.sun.org.apache.xerces.internal.jaxp.*", - "ch.qos.logback.*", - "org.slf4j.*", - "org.apache.logging.*"}) @Test(groups = "flaky") -@PrepareForTest({ManagedLedgerImpl.class}) public class StreamingEntryReaderTests extends MockedBookKeeperTestCase { private static final Charset Encoding = Charsets.UTF_8; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java index 2f702ab89f645..1595428f32051 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ConsumerStatsTest.java @@ -20,17 +20,33 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Multimap; import com.google.common.collect.Sets; +import java.io.ByteArrayOutputStream; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.service.Subscription; +import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.MessageListener; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerConsumerBase; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; -import org.apache.pulsar.common.policies.data.TopicStats; -import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ConsumerStats; +import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.policies.data.stats.ConsumerStatsImpl; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.testng.Assert; @@ -38,11 +54,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; - @Slf4j @Test(groups = "broker") public class ConsumerStatsTest extends ProducerConsumerBase { @@ -182,6 +193,7 @@ public void testConsumerStatsOutput() throws Exception { "msgRateOut", "msgThroughputOut", "bytesOutCounter", + "messageAckRate", "msgOutCounter", "msgRateRedeliver", "chunkedMessageRate", @@ -220,4 +232,118 @@ public void testConsumerStatsOutput() throws Exception { consumer.close(); } + + @Test + public void testPersistentTopicMessageAckRateMetricTopicLevel() throws Exception { + String topicName = "persistent://public/default/msg_ack_rate" + UUID.randomUUID(); + testMessageAckRateMetric(topicName, true); + } + + @Test + public void testPersistentTopicMessageAckRateMetricNamespaceLevel() throws Exception { + String topicName = "persistent://public/default/msg_ack_rate" + UUID.randomUUID(); + testMessageAckRateMetric(topicName, false); + } + + private void testMessageAckRateMetric(String topicName, boolean exposeTopicLevelMetrics) + throws Exception { + final int messages = 1000; + String subName = "test_sub"; + CountDownLatch latch = new CountDownLatch(messages); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicName) + .enableBatching(true).batchingMaxMessages(10).create(); + + MessageListener listener = (consumer, msg) -> { + try { + consumer.acknowledge(msg); + latch.countDown(); + } catch (PulsarClientException e) { + //ignore + } + }; + @Cleanup + Consumer c1 = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .messageListener(listener) + .subscribe(); + @Cleanup + Consumer c2 = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .messageListener(listener) + .subscribe(); + + String namespace = TopicName.get(topicName).getNamespace(); + + for (int i = 0; i < messages; i++) { + producer.sendAsync(UUID.randomUUID().toString()); + } + producer.flush(); + + latch.await(20, TimeUnit.SECONDS); + TimeUnit.SECONDS.sleep(1); + + Topic topic = pulsar.getBrokerService().getTopic(topicName, false).get().get(); + Subscription subscription = topic.getSubscription(subName); + List consumers = subscription.getConsumers(); + Assert.assertEquals(consumers.size(), 2); + org.apache.pulsar.broker.service.Consumer consumer1 = consumers.get(0); + org.apache.pulsar.broker.service.Consumer consumer2 = consumers.get(1); + consumer1.updateRates(); + consumer2.updateRates(); + + ByteArrayOutputStream output = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, exposeTopicLevelMetrics, true, true, output); + String metricStr = output.toString(); + + Multimap metricsMap = PrometheusMetricsTest.parseMetrics(metricStr); + Collection ackRateMetric = metricsMap.get("pulsar_consumer_msg_ack_rate"); + + Collection subAckRateMetrics = metricsMap.get("pulsar_subscription_msg_ack_rate"); + + String rateOutMetricName = exposeTopicLevelMetrics ? "pulsar_consumer_msg_rate_out" : "pulsar_rate_out"; + Collection rateOutMetric = metricsMap.get(rateOutMetricName); + Assert.assertTrue(ackRateMetric.size() > 0); + Assert.assertTrue(rateOutMetric.size() > 0); + + if (exposeTopicLevelMetrics) { + String consumer1Name = consumer1.consumerName(); + String consumer2Name = consumer2.consumerName(); + double totalAckRate = ackRateMetric.stream() + .filter(metric -> metric.tags.get("consumer_name").equals(consumer1Name) + || metric.tags.get("consumer_name").equals(consumer2Name)) + .mapToDouble(metric -> metric.value).sum(); + double totalRateOut = rateOutMetric.stream() + .filter(metric -> metric.tags.get("consumer_name").equals(consumer1Name) + || metric.tags.get("consumer_name").equals(consumer2Name)) + .mapToDouble(metric -> metric.value).sum(); + double subAckRate = subAckRateMetrics + .stream() + .filter(m -> m.tags.get("subscription").equals(subName)) + .mapToDouble(m -> m.value) + .sum(); + + Assert.assertEquals(subAckRateMetrics.size(), 1); + Assert.assertTrue(totalAckRate > 0D); + Assert.assertTrue(totalRateOut > 0D); + Assert.assertEquals(totalAckRate, subAckRate, 0.1D * totalAckRate); + Assert.assertEquals(totalAckRate, totalRateOut, totalRateOut * 0.1D); + } else { + double totalAckRate = ackRateMetric.stream() + .filter(metric -> namespace.equals(metric.tags.get("namespace"))) + .mapToDouble(metric -> metric.value).sum(); + double totalRateOut = rateOutMetric.stream() + .filter(metric -> namespace.equals(metric.tags.get("namespace"))) + .mapToDouble(metric -> metric.value).sum(); + + Assert.assertTrue(totalAckRate > 0D); + Assert.assertTrue(totalRateOut > 0D); + Assert.assertEquals(totalAckRate, totalRateOut, totalRateOut * 0.1D); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedCursorMetricsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedCursorMetricsTest.java index 9b3b354aa28df..4648ae2fb8f4a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedCursorMetricsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/ManagedCursorMetricsTest.java @@ -18,22 +18,27 @@ */ package org.apache.pulsar.broker.stats; +import java.util.List; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; import org.apache.bookkeeper.client.PulsarMockLedgerHandle; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.stats.metrics.ManagedCursorMetrics; +import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.ConsumerImpl; +import org.apache.pulsar.client.impl.PulsarTestClient; import org.apache.pulsar.common.stats.Metrics; +import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.List; -import java.util.concurrent.TimeUnit; - @Test(groups = "broker") public class ManagedCursorMetricsTest extends MockedPulsarServiceBaseTest { @@ -49,6 +54,12 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @Override + protected PulsarClient createNewPulsarClient(ClientBuilder clientBuilder) throws PulsarClientException { + return PulsarTestClient.create(clientBuilder); + } + + @Test public void testManagedCursorMetrics() throws Exception { final String subName = "my-sub"; final String topicName = "persistent://my-namespace/use/my-ns/my-topic1"; @@ -62,14 +73,18 @@ public void testManagedCursorMetrics() throws Exception { metricsList = metrics.generate(); Assert.assertTrue(metricsList.isEmpty()); - Consumer consumer = pulsarClient.newConsumer() + PulsarTestClient pulsarClient = (PulsarTestClient) this.pulsarClient; + @Cleanup + ConsumerImpl consumer = (ConsumerImpl) this.pulsarClient.newConsumer() .topic(topicName) .subscriptionType(SubscriptionType.Shared) .ackTimeout(1, TimeUnit.SECONDS) .subscriptionName(subName) + .isAckReceiptEnabled(true) .subscribe(); - Producer producer = pulsarClient.newProducer() + @Cleanup + Producer producer = this.pulsarClient.newProducer() .topic(topicName) .create(); @@ -82,6 +97,8 @@ public void testManagedCursorMetrics() throws Exception { producer.send(message.getBytes()); consumer.acknowledge(consumer.receive().getMessageId()); } + + Awaitility.await().until(() -> pulsarClient.getConnection(topicName).get().getPendingRequests().size() == 0); metricsList = metrics.generate(); Assert.assertFalse(metricsList.isEmpty()); Assert.assertNotEquals(metricsList.get(0).getMetrics().get("brk_ml_cursor_persistLedgerSucceed"), 0L); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java index 9f098935efabf..18f7597207e30 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/PrometheusMetricsTest.java @@ -50,17 +50,18 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.crypto.SecretKey; import javax.naming.AuthenticationException; import lombok.Cleanup; -import org.apache.bookkeeper.client.BookKeeper; import org.apache.commons.io.IOUtils; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.broker.service.AbstractTopic; import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentMessageExpiryMonitor; @@ -74,7 +75,6 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.compaction.Compactor; -import org.apache.pulsar.compaction.TwoPhaseCompactor; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -95,6 +95,108 @@ protected void setup() throws Exception { @Override protected void cleanup() throws Exception { super.internalCleanup(); + resetConfig(); + } + + @Test + public void testPublishRateLimitedTimes() throws Exception { + cleanup(); + checkPublishRateLimitedTimes(true); + cleanup(); + checkPublishRateLimitedTimes(false); + } + + private void checkPublishRateLimitedTimes(boolean preciseRateLimit) throws Exception { + if (preciseRateLimit) { + conf.setBrokerPublisherThrottlingTickTimeMillis(10000000); + conf.setMaxPublishRatePerTopicInMessages(1); + conf.setMaxPublishRatePerTopicInBytes(1); + conf.setBrokerPublisherThrottlingMaxMessageRate(100000); + conf.setBrokerPublisherThrottlingMaxByteRate(10000000); + } else { + conf.setBrokerPublisherThrottlingTickTimeMillis(1); + conf.setBrokerPublisherThrottlingMaxMessageRate(1); + conf.setBrokerPublisherThrottlingMaxByteRate(1); + } + conf.setStatsUpdateFrequencyInSecs(100000000); + conf.setPreciseTopicPublishRateLimiterEnable(preciseRateLimit); + setup(); + String ns1 = "prop/ns-abc1" + UUID.randomUUID(); + admin.namespaces().createNamespace(ns1, 1); + String topicName = "persistent://" + ns1 + "/metrics" + UUID.randomUUID(); + String topicName2 = "persistent://" + ns1 + "/metrics" + UUID.randomUUID(); + String topicName3 = "persistent://" + ns1 + "/metrics" + UUID.randomUUID(); + // Use another connection + @Cleanup + PulsarClient client2 = newPulsarClient(lookupUrl.toString(), 0); + + Producer producer = pulsarClient.newProducer().producerName("my-pub").enableBatching(false) + .topic(topicName).create(); + Producer producer2 = pulsarClient.newProducer().producerName("my-pub-2").enableBatching(false) + .topic(topicName2).create(); + Producer producer3 = client2.newProducer().producerName("my-pub-2").enableBatching(false) + .topic(topicName3).create(); + producer.sendAsync(new byte[11]); + + PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopic(topicName, false).get().get(); + Field field = AbstractTopic.class.getDeclaredField("publishRateLimitedTimes"); + field.setAccessible(true); + Awaitility.await().untilAsserted(() -> { + long value = (long) field.get(persistentTopic); + assertEquals(value, 1); + }); + @Cleanup + ByteArrayOutputStream statsOut = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, true, false, false, statsOut); + String metricsStr = statsOut.toString(); + Multimap metrics = parseMetrics(metricsStr); + assertTrue(metrics.containsKey("pulsar_publish_rate_limit_times")); + metrics.get("pulsar_publish_rate_limit_times").forEach(item -> { + if (ns1.equals(item.tags.get("namespace"))) { + if (item.tags.get("topic").equals(topicName)) { + assertEquals(item.value, 1); + return; + } else if (item.tags.get("topic").equals(topicName2)) { + assertEquals(item.value, 1); + return; + } else if (item.tags.get("topic").equals(topicName3)) { + //When using precise rate limiting, we only trigger the rate limiting of the topic, + // so if the topic is not using the same connection, the rate limiting times will be 0 + //When using asynchronous rate limiting, we will trigger the broker-level rate limiting, + // and all connections will be limited at this time. + if (preciseRateLimit) { + assertEquals(item.value, 0); + } else { + assertEquals(item.value, 1); + } + return; + } + fail("should not fail"); + } + }); + // Stats updater will reset the stats + pulsar.getBrokerService().updateRates(); + Awaitility.await().untilAsserted(() -> { + long value = (long) field.get(persistentTopic); + assertEquals(value, 0); + }); + + @Cleanup + ByteArrayOutputStream statsOut2 = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, true, false, false, statsOut2); + String metricsStr2 = statsOut2.toString(); + Multimap metrics2 = parseMetrics(metricsStr2); + assertTrue(metrics2.containsKey("pulsar_publish_rate_limit_times")); + metrics2.get("pulsar_publish_rate_limit_times").forEach(item -> { + if (ns1.equals(item.tags.get("namespace"))) { + assertEquals(item.value, 0); + } + }); + + producer.close(); + producer2.close(); + producer3.close(); } @Test @@ -821,6 +923,38 @@ public void testManagedLedgerBookieClientStats() throws Exception { cm = (List) metrics.get("pulsar_managedLedger_client_bookkeeper_ml_workers_task_execution_count"); assertEquals(cm.size(), 0); + + cm = (List) metrics.get( + keyNameBySubstrings(metrics, "pulsar_managedLedger_client", "bookkeeper_ml_scheduler_total_tasks")); + assertEquals(cm.size(), 1); + assertEquals(cm.get(0).tags.get("cluster"), "test"); + + cm = (List) metrics.get(keyNameBySubstrings(metrics, "pulsar_managedLedger_client", + "bookkeeper_ml_scheduler_task_execution_sum")); + assertEquals(cm.size(), 2); + assertEquals(cm.get(0).tags.get("cluster"), "test"); + + cm = (List) metrics.get( + keyNameBySubstrings(metrics, + "pulsar_managedLedger_client", "bookkeeper_ml_scheduler_queue")); + assertEquals(cm.size(), 1); + assertEquals(cm.get(0).tags.get("cluster"), "test"); + } + + private static String keyNameBySubstrings(Multimap metrics, String... substrings) { + for (String key : metrics.keys()) { + boolean found = true; + for (String s : substrings) { + if (!key.contains(s)) { + found = false; + break; + } + } + if (found) { + return key; + } + } + return null; } @Test @@ -1188,7 +1322,7 @@ public void testCompaction() throws Exception { } ScheduledExecutorService compactionScheduler = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setNameFormat("compactor").setDaemon(true).build()); - Compactor compactor = pulsar.getCompactor(true); + Compactor compactor = pulsar.getCompactor(); compactor.compact(topicName).get(); statsOut = new ByteArrayOutputStream(); PrometheusMetricsGenerator.generate(pulsar, true, false, false, statsOut); @@ -1217,7 +1351,7 @@ public void testCompaction() throws Exception { assertEquals(cm.get(0).value, 10); cm = (List) metrics.get("pulsar_compaction_compacted_entries_size"); assertEquals(cm.size(), 1); - assertEquals(cm.get(0).value, 870); + assertEquals(cm.get(0).value, 840); pulsarClient.close(); } @@ -1264,6 +1398,64 @@ public void testSplitTopicAndPartitionLabel() throws Exception { consumer2.close(); } + + @Test + public void testMetricsGroupedByTypeDefinitions() throws Exception { + Producer p1 = pulsarClient.newProducer().topic("persistent://my-property/use/my-ns/my-topic1").create(); + Producer p2 = pulsarClient.newProducer().topic("persistent://my-property/use/my-ns/my-topic2").create(); + for (int i = 0; i < 10; i++) { + String message = "my-message-" + i; + p1.send(message.getBytes()); + p2.send(message.getBytes()); + } + + ByteArrayOutputStream statsOut = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, false, false, false, statsOut); + String metricsStr = statsOut.toString(); + + Pattern typePattern = Pattern.compile("^#\\s+TYPE\\s+(\\w+)\\s+(\\w+)"); + Pattern metricNamePattern = Pattern.compile("^(\\w+)\\{.+"); + + AtomicReference currentMetric = new AtomicReference<>(); + Splitter.on("\n").split(metricsStr).forEach(line -> { + if (line.isEmpty()) { + return; + } + if (line.startsWith("#")) { + // Get the current type definition + Matcher typeMatcher = typePattern.matcher(line); + checkArgument(typeMatcher.matches()); + String metricName = typeMatcher.group(1); + currentMetric.set(metricName); + } else { + Matcher metricMatcher = metricNamePattern.matcher(line); + checkArgument(metricMatcher.matches()); + String metricName = metricMatcher.group(1); + + if (metricName.endsWith("_bucket")) { + metricName = metricName.substring(0, metricName.indexOf("_bucket")); + } else if (metricName.endsWith("_count") && !currentMetric.get().endsWith("_count")) { + metricName = metricName.substring(0, metricName.indexOf("_count")); + } else if (metricName.endsWith("_sum") && !currentMetric.get().endsWith("_sum")) { + metricName = metricName.substring(0, metricName.indexOf("_sum")); + } else if (metricName.endsWith("_total") && !currentMetric.get().endsWith("_total")) { + metricName = metricName.substring(0, metricName.indexOf("_total")); + } else if (metricName.endsWith("_created") && !currentMetric.get().endsWith("_created")) { + metricName = metricName.substring(0, metricName.indexOf("_created")); + } + + if (!metricName.equals(currentMetric.get())) { + System.out.println(metricsStr); + fail("Metric not grouped under its type definition: " + line); + } + + } + }); + + p1.close(); + p2.close(); + } + private void compareCompactionStateCount(List cm, double count) { assertEquals(cm.size(), 1); assertEquals(cm.get(0).tags.get("cluster"), "test"); @@ -1317,9 +1509,9 @@ public static Multimap parseMetrics(String metrics) { return parsed; } - static class Metric { - Map tags = new TreeMap<>(); - double value; + public static class Metric { + public Map tags = new TreeMap<>(); + public double value; @Override public String toString() { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionMetricsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionMetricsTest.java index cb8e4305ddf1e..0c9f877150a8e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionMetricsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/TransactionMetricsTest.java @@ -18,8 +18,14 @@ */ package org.apache.pulsar.broker.stats; +import com.google.common.base.Splitter; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; @@ -40,6 +46,7 @@ import org.apache.pulsar.transaction.coordinator.TransactionSubscription; import org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl; import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -50,10 +57,13 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static com.google.common.base.Preconditions.checkArgument; import static org.apache.pulsar.broker.stats.PrometheusMetricsTest.parseMetrics; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +@Slf4j public class TransactionMetricsTest extends BrokerTestBase { @BeforeMethod(alwaysRun = true) @@ -153,9 +163,11 @@ public void testTransactionCoordinatorRateMetrics() throws Exception{ for (int i = 0; i < txnCount; i++) { if (i % 2 == 0) { - pulsar.getTransactionMetadataStoreService().endTransaction(list.get(i), TxnAction.COMMIT_VALUE, false).get(); + pulsar.getTransactionMetadataStoreService().endTransaction(list.get(i), TxnAction.COMMIT_VALUE, + false).get(); } else { - pulsar.getTransactionMetadataStoreService().endTransaction(list.get(i), TxnAction.ABORT_VALUE, false).get(); + pulsar.getTransactionMetadataStoreService().endTransaction(list.get(i), TxnAction.ABORT_VALUE, + false).get(); } } @@ -271,6 +283,139 @@ public void testManagedLedgerMetrics() throws Exception{ assertEquals(metric.size(), 2); } + @Test + public void testManagedLedgerMetricsWhenPendingAckNotInit() throws Exception{ + String ns1 = "prop/ns-abc1"; + admin.namespaces().createNamespace(ns1); + String topic = "persistent://" + ns1 + "/testManagedLedgerMetricsWhenPendingAckNotInit"; + String subName = "test_managed_ledger_metrics"; + String subName2 = "test_pending_ack_no_init"; + admin.topics().createNonPartitionedTopic(topic); + admin.lookups().lookupTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString()); + TransactionCoordinatorID transactionCoordinatorIDOne = TransactionCoordinatorID.get(0); + pulsar.getTransactionMetadataStoreService().handleTcClientConnect(transactionCoordinatorIDOne).get(); + admin.topics().createSubscription(topic, subName, MessageId.earliest); + admin.topics().createSubscription(topic, subName2, MessageId.earliest); + + Awaitility.await().atMost(2000, TimeUnit.MILLISECONDS).until(() -> + pulsar.getTransactionMetadataStoreService().getStores().size() == 1); + + pulsarClient = PulsarClient.builder().serviceUrl(lookupUrl.toString()).enableTransaction(true).build(); + + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .receiverQueueSize(10) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Key_Shared) + .subscribe(); + + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + + Transaction transaction = + pulsarClient.newTransaction().withTransactionTimeout(10, TimeUnit.SECONDS).build().get(); + producer.send("hello pulsar".getBytes()); + consumer.acknowledgeAsync(consumer.receive().getMessageId(), transaction).get(); + ByteArrayOutputStream statsOut = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, true, false, false, statsOut); + String metricsStr = statsOut.toString(); + + Multimap metrics = parseMetrics(metricsStr); + + Collection metric = metrics.get("pulsar_storage_size"); + checkManagedLedgerMetrics(subName, 32, metric); + //No statistics of the pendingAck are generated when the pendingAck is not initialized. + for (PrometheusMetricsTest.Metric metric1 : metric) { + if (metric1.tags.containsValue(subName2)) { + Assert.fail(); + } + } + + consumer = pulsarClient.newConsumer() + .topic(topic) + .receiverQueueSize(10) + .subscriptionName(subName2) + .subscriptionType(SubscriptionType.Key_Shared) + .subscribe(); + transaction = + pulsarClient.newTransaction().withTransactionTimeout(10, TimeUnit.SECONDS).build().get(); + consumer.acknowledgeAsync(consumer.receive().getMessageId(), transaction).get(); + + statsOut = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, true, false, false, statsOut); + metricsStr = statsOut.toString(); + metrics = parseMetrics(metricsStr); + metric = metrics.get("pulsar_storage_size"); + checkManagedLedgerMetrics(subName2, 32, metric); + } + + @Test + public void testDuplicateMetricTypeDefinitions() throws Exception{ + admin.lookups().lookupTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString()); + TransactionCoordinatorID transactionCoordinatorIDOne = TransactionCoordinatorID.get(0); + TransactionCoordinatorID transactionCoordinatorIDTwo = TransactionCoordinatorID.get(1); + pulsar.getTransactionMetadataStoreService().handleTcClientConnect(transactionCoordinatorIDOne); + pulsar.getTransactionMetadataStoreService().handleTcClientConnect(transactionCoordinatorIDTwo); + + Awaitility.await().until(() -> + pulsar.getTransactionMetadataStoreService().getStores().size() == 2); + pulsarClient = PulsarClient.builder().serviceUrl(lookupUrl.toString()).enableTransaction(true).build(); + Producer p1 = pulsarClient + .newProducer() + .topic("persistent://my-property/use/my-ns/my-topic1") + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + Transaction transaction = pulsarClient + .newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + for (int i = 0; i < 10; i++) { + String message = "my-message-" + i; + p1.newMessage(transaction) + .value(message.getBytes()) + .send(); + } + ByteArrayOutputStream statsOut = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, false, false, false, statsOut); + String metricsStr = statsOut.toString(); + + Map typeDefs = new HashMap<>(); + Map metricNames = new HashMap<>(); + Pattern typePattern = Pattern.compile("^#\\s+TYPE\\s+(\\w+)\\s+(\\w+)"); + + Splitter.on("\n").split(metricsStr).forEach(line -> { + if (line.isEmpty()) { + return; + } + if (line.startsWith("#")) { + // Check for duplicate type definitions + Matcher typeMatcher = typePattern.matcher(line); + checkArgument(typeMatcher.matches()); + String metricName = typeMatcher.group(1); + String type = typeMatcher.group(2); + // From https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md + // "Only one TYPE line may exist for a given metric name." + if (!typeDefs.containsKey(metricName)) { + typeDefs.put(metricName, type); + } else { + log.warn(metricsStr); + fail("Duplicate type definition found for TYPE definition " + metricName); + + } + // From https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md + // "The TYPE line for a metric name must appear before the first sample is reported for that metric name." + if (metricNames.containsKey(metricName)) { + log.info(metricsStr); + fail("TYPE definition for " + metricName + " appears after first sample"); + + } + } + }); + + } + private void checkManagedLedgerMetrics(String tag, double value, Collection metrics) { boolean exist = false; for (PrometheusMetricsTest.Metric metric1 : metrics) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreamsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreamsTest.java new file mode 100644 index 0000000000000..15c29a0dc66bb --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/prometheus/PrometheusMetricStreamsTest.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.stats.prometheus; + +import static org.testng.Assert.assertTrue; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import org.apache.pulsar.common.util.SimpleTextOutputStream; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +@Test(groups = "broker") +public class PrometheusMetricStreamsTest { + + private PrometheusMetricStreams underTest; + + @BeforeMethod(alwaysRun = true) + protected void setup() throws Exception { + underTest = new PrometheusMetricStreams(); + } + + @AfterMethod(alwaysRun = true) + protected void cleanup() throws Exception { + underTest.releaseAll(); + } + + @Test + public void canWriteSampleWithoutLabels() { + underTest.writeSample("my-metric", 123); + + String actual = writeToString(); + + assertTrue(actual.startsWith("# TYPE my-metric gauge"), "Gauge type line missing"); + assertTrue(actual.contains("my-metric{} 123"), "Metric line missing"); + } + + @Test + public void canWriteSampleWithLabels() { + underTest.writeSample("my-other-metric", 123, "cluster", "local"); + underTest.writeSample("my-other-metric", 456, "cluster", "local", "namespace", "my-ns"); + + String actual = writeToString(); + + assertTrue(actual.startsWith("# TYPE my-other-metric gauge"), "Gauge type line missing"); + assertTrue(actual.contains("my-other-metric{cluster=\"local\"} 123"), "Cluster metric line missing"); + assertTrue(actual.contains("my-other-metric{cluster=\"local\",namespace=\"my-ns\"} 456"), + "Cluster and Namespace metric line missing"); + } + + private String writeToString() { + ByteBuf buffer = ByteBufAllocator.DEFAULT.directBuffer(); + try { + SimpleTextOutputStream stream = new SimpleTextOutputStream(buffer); + underTest.flushAllToStream(stream); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + int readIndex = buffer.readerIndex(); + int readableBytes = buffer.readableBytes(); + for (int i = 0; i < readableBytes; i++) { + out.write(buffer.getByte(readIndex + i)); + } + return out.toString(); + } finally { + buffer.release(); + } + } +} \ No newline at end of file diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/NamespaceEventsSystemTopicServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/NamespaceEventsSystemTopicServiceTest.java index b524e1a2148c6..12f9ea26029d6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/NamespaceEventsSystemTopicServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/NamespaceEventsSystemTopicServiceTest.java @@ -18,10 +18,19 @@ */ package org.apache.pulsar.broker.systopic; +import static org.mockito.Mockito.mock; import com.google.common.collect.Sets; +import lombok.Cleanup; +import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.service.persistent.SystemTopic; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.events.ActionType; import org.apache.pulsar.common.events.EventType; import org.apache.pulsar.common.events.EventsTopicNames; @@ -31,6 +40,7 @@ import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicPolicies; import org.slf4j.Logger; @@ -64,6 +74,35 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @Test + public void testSchemaCompatibility() throws Exception { + TopicPoliciesSystemTopicClient systemTopicClientForNamespace1 = systemTopicFactory + .createTopicPoliciesSystemTopicClient(NamespaceName.get(NAMESPACE1)); + String topicName = systemTopicClientForNamespace1.getTopicName().toString(); + @Cleanup + Reader reader = pulsarClient.newReader(Schema.BYTES) + .topic(topicName) + .startMessageId(MessageId.earliest) + .create(); + + PersistentTopic topic = + (PersistentTopic) pulsar.getBrokerService() + .getTopic(topicName, false) + .join().get(); + + Assert.assertEquals(SchemaCompatibilityStrategy.ALWAYS_COMPATIBLE, topic.getSchemaCompatibilityStrategy()); + } + + @Test + public void testSystemTopicSchemaCompatibility() throws Exception { + TopicPoliciesSystemTopicClient systemTopicClientForNamespace1 = systemTopicFactory + .createTopicPoliciesSystemTopicClient(NamespaceName.get(NAMESPACE1)); + String topicName = systemTopicClientForNamespace1.getTopicName().toString(); + SystemTopic topic = new SystemTopic(topicName, mock(ManagedLedger.class), pulsar.getBrokerService()); + + Assert.assertEquals(SchemaCompatibilityStrategy.ALWAYS_COMPATIBLE, topic.getSchemaCompatibilityStrategy()); + } + @Test public void testSendAndReceiveNamespaceEvents() throws Exception { TopicPoliciesSystemTopicClient systemTopicClientForNamespace1 = systemTopicFactory @@ -118,9 +157,9 @@ public void checkSystemTopic() throws PulsarAdminException { admin.topics().createPartitionedTopic(normalTopic, 3); TopicName systemTopicName = TopicName.get(systemTopic); TopicName normalTopicName = TopicName.get(normalTopic); - - Assert.assertEquals(SystemTopicClient.isSystemTopic(systemTopicName), true); - Assert.assertEquals(SystemTopicClient.isSystemTopic(normalTopicName), false); + BrokerService brokerService = pulsar.getBrokerService(); + Assert.assertEquals(brokerService.isSystemTopic(systemTopicName), true); + Assert.assertEquals(brokerService.isSystemTopic(normalTopicName), false); } private void prepareData() throws PulsarAdminException { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java index bd4ef7870da9f..cf45d614f0fb4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/PartitionedSystemTopicTest.java @@ -18,13 +18,46 @@ */ package org.apache.pulsar.broker.systopic; +import com.google.common.collect.Sets; +import lombok.Cleanup; +import org.apache.bookkeeper.mledger.LedgerOffloader; +import org.apache.bookkeeper.mledger.ManagedLedgerConfig; +import org.apache.bookkeeper.mledger.impl.NullLedgerOffloader; +import org.apache.commons.lang.RandomStringUtils; +import org.apache.pulsar.broker.admin.impl.BrokersBase; +import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerTestBase; +import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.common.events.EventsTopicNames; import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.policies.data.BacklogQuota; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.naming.TopicVersion; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.util.FutureUtil; +import org.awaitility.Awaitility; +import org.mockito.Mockito; +import org.powermock.reflect.Whitebox; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; @Test(groups = "broker") public class PartitionedSystemTopicTest extends BrokerTestBase { @@ -38,6 +71,8 @@ protected void setup() throws Exception { conf.setAllowAutoTopicCreation(false); conf.setAllowAutoTopicCreationType("partitioned"); conf.setDefaultNumPartitions(PARTITIONS); + conf.setManagedLedgerMaxEntriesPerLedger(1); + conf.setBrokerDeleteInactiveTopicsEnabled(false); conf.setSystemTopicEnabled(true); conf.setTopicLevelPoliciesEnabled(true); @@ -65,5 +100,138 @@ public void testAutoCreatedPartitionedSystemTopic() throws Exception { Assert.assertEquals(admin.topics().getPartitionedTopicList(ns).size(), 1); Assert.assertEquals(partitions, PARTITIONS); Assert.assertEquals(admin.topics().getList(ns).size(), PARTITIONS); + reader.close(); + } + + @Test(timeOut = 1000 * 60) + public void testConsumerCreationWhenEnablingTopicPolicy() throws Exception { + String tenant = "tenant-" + RandomStringUtils.randomAlphabetic(4).toLowerCase(); + admin.tenants().createTenant(tenant, new TenantInfoImpl(Sets.newHashSet(), Sets.newHashSet("test"))); + int namespaceCount = 30; + for (int i = 0; i < namespaceCount; i++) { + String ns = tenant + "/ns-" + i; + admin.namespaces().createNamespace(ns, 4); + String topic = ns + "/t1"; + admin.topics().createPartitionedTopic(topic, 2); + } + + List>> futureList = new ArrayList<>(); + for (int i = 0; i < namespaceCount; i++) { + String topic = tenant + "/ns-" + i + "/t1"; + futureList.add(pulsarClient.newConsumer() + .topic(topic) + .subscriptionName("sub") + .subscribeAsync()); + } + FutureUtil.waitForAll(futureList).get(); + // Close all the consumers after check + for (CompletableFuture> consumer : futureList) { + consumer.join().close(); + } + } + + @Test + public void testProduceAndConsumeUnderSystemNamespace() throws Exception { + TenantInfo tenantInfo = TenantInfo + .builder() + .adminRoles(Sets.newHashSet("admin")) + .allowedClusters(Sets.newHashSet("test")) + .build(); + admin.tenants().createTenant("pulsar", tenantInfo); + admin.namespaces().createNamespace("pulsar/system", 2); + @Cleanup + Producer producer = pulsarClient.newProducer().topic("pulsar/system/__topic-1").create(); + producer.send("test".getBytes(StandardCharsets.UTF_8)); + @Cleanup + Consumer consumer = pulsarClient + .newConsumer() + .topic("pulsar/system/__topic-1") + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionName("sub1") + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + Message receive = consumer.receive(5, TimeUnit.SECONDS); + Assert.assertNotNull(receive); + } + + @Test + public void testHealthCheckTopicNotOffload() throws Exception { + NamespaceName namespaceName = NamespaceService.getHeartbeatNamespaceV2(pulsar.getAdvertisedAddress(), + pulsar.getConfig()); + TopicName topicName = TopicName.get("persistent", namespaceName, BrokersBase.HEALTH_CHECK_TOPIC_SUFFIX); + PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService() + .getTopic(topicName.toString(), true).get().get(); + ManagedLedgerConfig config = persistentTopic.getManagedLedger().getConfig(); + config.setLedgerOffloader(NullLedgerOffloader.INSTANCE); + admin.brokers().healthcheck(TopicVersion.V2); + admin.topics().triggerOffload(topicName.toString(), MessageId.earliest); + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(persistentTopic.getManagedLedger().getOffloadedSize(), 0); + }); + LedgerOffloader ledgerOffloader = Mockito.mock(LedgerOffloader.class); + config.setLedgerOffloader(ledgerOffloader); + Assert.assertEquals(config.getLedgerOffloader(), ledgerOffloader); + admin.topicPolicies().setMaxConsumers(topicName.toString(), 2); + Awaitility.await().pollDelay(5, TimeUnit.SECONDS).untilAsserted(() -> { + Assert.assertEquals(persistentTopic.getManagedLedger().getConfig().getLedgerOffloader(), + NullLedgerOffloader.INSTANCE); + }); + } + + @Test + private void testSetBacklogCausedCreatingProducerFailure() throws Exception { + final String ns = "prop/ns-test"; + final String topic = ns + "/topic-1"; + + admin.namespaces().createNamespace(ns, 2); + admin.topics().createPartitionedTopic(String.format("persistent://%s", topic), 1); + BacklogQuota quota = BacklogQuota.builder() + .limitTime(2) + .limitSize(-1) + .retentionPolicy(BacklogQuota.RetentionPolicy.producer_exception) + .build(); + admin.namespaces().setBacklogQuota(ns, quota, BacklogQuota.BacklogQuotaType.message_age); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + + String partition0 = TopicName.get(String.format("persistent://%s", topic)).getPartition(0).toString(); + Optional topicReference = pulsar.getBrokerService().getTopicReference(partition0); + Assert.assertTrue(topicReference.isPresent()); + PersistentTopic persistentTopic = (PersistentTopic) topicReference.get(); + ManagedLedgerConfig config = persistentTopic.getManagedLedger().getConfig(); + config.setMinimumRolloverTime(1, TimeUnit.SECONDS); + config.setMaximumRolloverTime(1, TimeUnit.SECONDS); + persistentTopic.getManagedLedger().setConfig(config); + Whitebox.invokeMethod(persistentTopic.getManagedLedger(), "updateLastLedgerCreatedTimeAndScheduleRolloverTask"); + String msg1 = "msg-1"; + producer.send(msg1); + Thread.sleep(3 * 1000); + + Consumer consumer2 = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub-1") + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Key_Shared) + .subscribe(); + + Message receive = consumer2.receive(); + consumer2.acknowledge(receive); + + Thread.sleep(3 * 1000); + + try { + Producer producerN = PulsarClient.builder() + .maxBackoffInterval(3, TimeUnit.SECONDS) + .operationTimeout(5, TimeUnit.SECONDS) + .serviceUrl(lookupUrl.toString()).connectionTimeout(2, TimeUnit.SECONDS).build() + .newProducer(Schema.STRING).topic(topic).sendTimeout(3, TimeUnit.SECONDS).create(); + Assert.assertTrue(producerN.isConnected()); + producerN.close(); + } catch (Exception ex) { + Assert.fail("failed to create producer"); + } } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/SystemTopicClientTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/SystemTopicClientTest.java deleted file mode 100644 index d6790b7ec8269..0000000000000 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/systopic/SystemTopicClientTest.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.broker.systopic; - -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertTrue; -import org.apache.pulsar.common.naming.TopicName; -import org.testng.annotations.Test; - -public class SystemTopicClientTest { - - @Test - public void testIsSystemTopic() { - assertFalse(SystemTopicClient.isSystemTopic(TopicName.get("test"))); - assertFalse(SystemTopicClient.isSystemTopic(TopicName.get("public/default/test"))); - assertFalse(SystemTopicClient.isSystemTopic(TopicName.get("persistent://public/default/test"))); - assertFalse(SystemTopicClient.isSystemTopic(TopicName.get("non-persistent://public/default/test"))); - - assertTrue(SystemTopicClient.isSystemTopic(TopicName.get("__change_events"))); - assertTrue(SystemTopicClient.isSystemTopic(TopicName.get("__change_events-partition-0"))); - assertTrue(SystemTopicClient.isSystemTopic(TopicName.get("__change_events-partition-1"))); - assertTrue(SystemTopicClient.isSystemTopic(TopicName.get("__transaction_buffer_snapshot"))); - assertTrue(SystemTopicClient.isSystemTopic(TopicName.get("__transaction_buffer_snapshot-partition-0"))); - assertTrue(SystemTopicClient.isSystemTopic(TopicName.get("__transaction_buffer_snapshot-partition-1"))); - assertTrue(SystemTopicClient.isSystemTopic(TopicName - .get("topicxxx-partition-0-multiTopicsReader-f433329d68__transaction_pending_ack"))); - assertTrue(SystemTopicClient.isSystemTopic( - TopicName.get("topicxxx-multiTopicsReader-f433329d68__transaction_pending_ack"))); - - } -} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TopicTransactionBufferRecoverTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TopicTransactionBufferRecoverTest.java index 3607b45a64a28..fb2968acd885a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TopicTransactionBufferRecoverTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TopicTransactionBufferRecoverTest.java @@ -18,8 +18,16 @@ */ package org.apache.pulsar.broker.transaction; -import com.google.common.collect.Sets; - +import static org.apache.pulsar.common.events.EventsTopicNames.TRANSACTION_BUFFER_SNAPSHOT; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; import java.io.IOException; import java.lang.reflect.Field; import java.lang.reflect.Method; @@ -35,10 +43,15 @@ import org.apache.bookkeeper.mledger.proto.MLDataFormats; import org.apache.commons.collections4.map.LinkedMap; import org.apache.commons.lang3.RandomUtils; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.service.AbstractTopic; import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.TransactionBufferSnapshotService; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.systopic.NamespaceEventsSystemTopicFactory; +import org.apache.pulsar.broker.systopic.SystemTopicClient; import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBuffer; import org.apache.pulsar.broker.transaction.buffer.matadata.TransactionBufferSnapshot; import org.apache.pulsar.client.api.Consumer; @@ -46,6 +59,7 @@ import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.ReaderBuilder; import org.apache.pulsar.client.api.Schema; @@ -55,28 +69,20 @@ import org.apache.pulsar.client.impl.transaction.TransactionImpl; import org.apache.pulsar.common.events.EventType; import org.apache.pulsar.common.events.EventsTopicNames; -import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.awaitility.Awaitility; +import org.powermock.reflect.Whitebox; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertNotNull; -import static org.testng.Assert.assertNull; -import static org.testng.Assert.assertTrue; - @Slf4j public class TopicTransactionBufferRecoverTest extends TransactionTestBase { - private static final String TENANT = "tnx"; - private static final String NAMESPACE1 = TENANT + "/ns1"; private static final String RECOVER_COMMIT = NAMESPACE1 + "/recover-commit"; private static final String RECOVER_ABORT = NAMESPACE1 + "/recover-abort"; private static final String SUBSCRIPTION_NAME = "test-recover"; @@ -85,36 +91,9 @@ public class TopicTransactionBufferRecoverTest extends TransactionTestBase { private static final int NUM_PARTITIONS = 16; @BeforeMethod protected void setup() throws Exception { - setBrokerCount(1); - internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); - admin.topics().createNonPartitionedTopic(RECOVER_COMMIT); + setUpBase(1, NUM_PARTITIONS, RECOVER_COMMIT, 0); admin.topics().createNonPartitionedTopic(RECOVER_ABORT); admin.topics().createNonPartitionedTopic(TAKE_SNAPSHOT); - - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), NUM_PARTITIONS); - - if (pulsarClient != null) { - pulsarClient.shutdown(); - } - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - - - // wait tc init success to ready state - waitForCoordinatorToBeAvailable(NUM_PARTITIONS); } @AfterMethod(alwaysRun = true) @@ -476,4 +455,173 @@ private void checkSnapshotCount(TopicName topicName, boolean hasSnapshot, reader.close(); } + + @Test(timeOut=30000) + public void testTransactionBufferRecoverThrowException() throws Exception { + String topic = NAMESPACE1 + "/testTransactionBufferRecoverThrowPulsarClientException"; + @Cleanup + Producer producer = pulsarClient + .newProducer() + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + + producer.newMessage(txn).value("test".getBytes()).sendAsync(); + producer.newMessage(txn).value("test".getBytes()).sendAsync(); + txn.commit().get(); + + producer.close(); + + PersistentTopic originalTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService().getTopic(TopicName.get(topic).toString(), false).get().get(); + TransactionBufferSnapshotService transactionBufferSnapshotService = + mock(TransactionBufferSnapshotService.class); + SystemTopicClient.Reader reader = mock(SystemTopicClient.Reader.class); + SystemTopicClient.Writer writer = mock(SystemTopicClient.Writer.class); + + doReturn(CompletableFuture.completedFuture(reader)).when(transactionBufferSnapshotService).createReader(any()); + doReturn(CompletableFuture.completedFuture(writer)).when(transactionBufferSnapshotService).createWriter(any()); + doReturn(CompletableFuture.completedFuture(null)).when(reader).closeAsync(); + doReturn(CompletableFuture.completedFuture(null)).when(writer).closeAsync(); + Field field = PulsarService.class.getDeclaredField("transactionBufferSnapshotService"); + field.setAccessible(true); + TransactionBufferSnapshotService transactionBufferSnapshotServiceOriginal = + (TransactionBufferSnapshotService) field.get(getPulsarServiceList().get(0)); + // mock reader can't read snapshot fail throw RuntimeException + doThrow(new RuntimeException("test")).when(reader).hasMoreEvents(); + // check reader close topic + checkCloseTopic(pulsarClient, transactionBufferSnapshotServiceOriginal, + transactionBufferSnapshotService, originalTopic, field); + doReturn(true).when(reader).hasMoreEvents(); + + // mock reader can't read snapshot fail throw PulsarClientException + doThrow(new PulsarClientException("test")).when(reader).hasMoreEvents(); + // check reader close topic + checkCloseTopic(pulsarClient, transactionBufferSnapshotServiceOriginal, + transactionBufferSnapshotService, originalTopic, field); + doReturn(true).when(reader).hasMoreEvents(); + + // mock create reader fail + doReturn(FutureUtil.failedFuture(new PulsarClientException("test"))) + .when(transactionBufferSnapshotService).createReader(any()); + // check create reader fail close topic + originalTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService().getTopic(TopicName.get(topic).toString(), false).get().get(); + checkCloseTopic(pulsarClient, transactionBufferSnapshotServiceOriginal, + transactionBufferSnapshotService, originalTopic, field); + doReturn(CompletableFuture.completedFuture(reader)).when(transactionBufferSnapshotService).createReader(any()); + + // check create writer fail close topic + originalTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService().getTopic(TopicName.get(topic).toString(), false).get().get(); + // mock create writer fail + doReturn(FutureUtil.failedFuture(new PulsarClientException("test"))) + .when(transactionBufferSnapshotService).createWriter(any()); + checkCloseTopic(pulsarClient, transactionBufferSnapshotServiceOriginal, + transactionBufferSnapshotService, originalTopic, field); + + } + + private void checkCloseTopic(PulsarClient pulsarClient, + TransactionBufferSnapshotService transactionBufferSnapshotServiceOriginal, + TransactionBufferSnapshotService transactionBufferSnapshotService, + PersistentTopic originalTopic, + Field field) throws Exception { + field.set(getPulsarServiceList().get(0), transactionBufferSnapshotService); + + // recover again will throw then close topic + new TopicTransactionBuffer(originalTopic); + Awaitility.await().untilAsserted(() -> { + // isFenced means closed + Field close = AbstractTopic.class.getDeclaredField("isFenced"); + close.setAccessible(true); + assertTrue((boolean) close.get(originalTopic)); + }); + + field.set(getPulsarServiceList().get(0), transactionBufferSnapshotServiceOriginal); + + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + + @Cleanup + Producer producer = pulsarClient + .newProducer() + .topic(originalTopic.getName()) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + producer.newMessage(txn).value("test".getBytes()).sendAsync(); + txn.commit().get(); + producer.close(); + } + + + @Test + public void testTransactionBufferNoSnapshotCloseReader() throws Exception{ + String topic = NAMESPACE1 + "/test"; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING).producerName("testTxnTimeOut_producer") + .topic(topic).sendTimeout(0, TimeUnit.SECONDS).enableBatching(false).create(); + + admin.topics().unload(topic); + + // unload success, all readers have been closed except for the compaction sub + producer.send("test"); + TopicStats stats = admin.topics().getStats(NAMESPACE1 + "/" + TRANSACTION_BUFFER_SNAPSHOT); + + // except for the compaction sub + assertEquals(stats.getSubscriptions().size(), 1); + assertTrue(stats.getSubscriptions().keySet().contains("__compaction")); + } + + @Test + public void transactionBufferRecoverFailRemoveProducerFuture() throws Exception { + String topic = NAMESPACE1 + "/transactionBufferRecoverFailRemoveProducerFuture"; + + @Cleanup + Producer producer = pulsarClient + .newProducer() + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + + + // txn buffer init success + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + producer.newMessage(txn).value("test".getBytes()).sendAsync(); + producer.newMessage(txn).value("test".getBytes()).sendAsync(); + txn.commit().get(); + + PersistentTopic originalTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService().getTopic(TopicName.get(topic).toString(), false).get().get(); + TopicTransactionBuffer topicTransactionBuffer = (TopicTransactionBuffer) originalTopic.getTransactionBuffer(); + + CompletableFuture bufferFuture = new CompletableFuture<>(); + bufferFuture.completeExceptionally(new BrokerServiceException.ServiceUnitNotReadyException("test")); + + // set fail future to topic transaction buffer + Whitebox.setInternalState(topicTransactionBuffer, "transactionBufferFuture", bufferFuture); + + originalTopic.getProducers().get(originalTopic.getProducers().keySet().toArray()[0]).disconnect().get(); + // client producer has been close and reconnect + Awaitility.await().untilAsserted(() -> assertFalse(producer.isConnected())); + // producer is dis connect + Awaitility.await().until(() -> !producer.isConnected()); + // client producer can't reconnect to broker + Awaitility.await().during(5, TimeUnit.SECONDS).until(() -> !producer.isConnected()); + + // recover the buffer future + bufferFuture = new CompletableFuture<>(); + bufferFuture.complete(null); + Whitebox.setInternalState(topicTransactionBuffer, "transactionBufferFuture", bufferFuture); + + // client producer can't connect to broker + Awaitility.await().until(producer::isConnected); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionClientReconnectTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionClientReconnectTest.java deleted file mode 100644 index 1f5ab15cf42be..0000000000000 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionClientReconnectTest.java +++ /dev/null @@ -1,266 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.broker.transaction; - -import com.google.common.collect.Sets; -import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; -import org.apache.pulsar.broker.TransactionMetadataStoreService; -import org.apache.pulsar.client.api.MessageId; -import org.apache.pulsar.client.api.PulsarClient; -import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException; -import org.apache.pulsar.client.api.transaction.TxnID; -import org.apache.pulsar.client.impl.PulsarClientImpl; -import org.apache.pulsar.client.impl.transaction.TransactionCoordinatorClientImpl; -import org.apache.pulsar.common.naming.NamespaceName; -import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; -import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; -import org.awaitility.Awaitility; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.lang.reflect.Field; -import java.util.Collections; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; -import static org.testng.FileAssert.fail; - -public class TransactionClientReconnectTest extends TransactionTestBase { - - private static final String RECONNECT_TOPIC = "persistent://public/txn/txn-client-reconnect-test"; - private static final int NUM_PARTITIONS = 1; - @BeforeMethod(alwaysRun = true) - public void setup() throws Exception { - setBrokerCount(1); - super.internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant("public", - new TenantInfoImpl(Sets.newHashSet(), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace("public/txn", 10); - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createNonPartitionedTopic(RECONNECT_TOPIC); - admin.topics().createSubscription(RECONNECT_TOPIC, "test", MessageId.latest); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), NUM_PARTITIONS); - - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - // wait tc init success to ready state - waitForCoordinatorToBeAvailable(NUM_PARTITIONS); - } - - @AfterMethod(alwaysRun = true) - protected void cleanup() { - super.internalCleanup(); - } - - @Test - public void testTransactionNewReconnect() throws Exception { - start(); - - // when throw CoordinatorNotFoundException client will reconnect tc - try { - pulsarClient.newTransaction() - .withTransactionTimeout(200, TimeUnit.MILLISECONDS).build().get(); - fail(); - } catch (ExecutionException e) { - assertTrue(e.getCause() instanceof TransactionCoordinatorClientException.CoordinatorNotFoundException); - } - reconnect(); - - fence(getPulsarServiceList().get(0).getTransactionMetadataStoreService()); - - // tc fence will remove this tc and reopen - try { - pulsarClient.newTransaction() - .withTransactionTimeout(200, TimeUnit.MILLISECONDS).build().get(); - fail(); - } catch (ExecutionException e) { - assertEquals(e.getCause().getMessage(), - "org.apache.bookkeeper.mledger.ManagedLedgerException$ManagedLedgerFencedException: " + - "java.lang.Exception: Attempted to use a fenced managed ledger"); - } - - reconnect(); - } - - @Test - public void testTransactionAddSubscriptionToTxnAsyncReconnect() throws Exception { - TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); - start(); - - try { - transactionCoordinatorClient.addSubscriptionToTxnAsync(new TxnID(0, 0), "test", "test").get(); - fail(); - } catch (ExecutionException e) { - assertTrue(e.getCause() instanceof TransactionCoordinatorClientException.CoordinatorNotFoundException); - } - - reconnect(); - fence(getPulsarServiceList().get(0).getTransactionMetadataStoreService()); - try { - transactionCoordinatorClient.addSubscriptionToTxnAsync(new TxnID(0, 0), "test", "test").get(); - fail(); - } catch (ExecutionException e) { - if (e.getCause() instanceof TransactionCoordinatorClientException.TransactionNotFoundException) { - assertEquals(e.getCause().getMessage(), "The transaction with this txdID `(0,0)`not found "); - } else { - assertEquals(e.getCause().getMessage(), "java.lang.Exception: Attempted to use a fenced managed ledger"); - } - } - reconnect(); - } - - @Test - public void testTransactionAbortToTxnAsyncReconnect() throws Exception { - TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); - start(); - - try { - transactionCoordinatorClient.abortAsync(new TxnID(0, 0)).get(); - fail(); - } catch (ExecutionException e) { - assertTrue(e.getCause() instanceof TransactionCoordinatorClientException.CoordinatorNotFoundException); - } - - reconnect(); - fence(getPulsarServiceList().get(0).getTransactionMetadataStoreService()); - try { - transactionCoordinatorClient.abortAsync(new TxnID(0, 0)).get(); - fail(); - } catch (ExecutionException e) { - if (e.getCause() instanceof TransactionCoordinatorClientException.TransactionNotFoundException) { - assertEquals(e.getCause().getMessage(), "The transaction with this txdID `(0,0)`not found "); - } else { - assertEquals(e.getCause().getMessage(), "java.lang.Exception: Attempted to use a fenced managed ledger"); - } - } - reconnect(); - } - - @Test - public void testTransactionCommitToTxnAsyncReconnect() throws Exception { - TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); - start(); - - try { - transactionCoordinatorClient.commitAsync(new TxnID(0, 0)).get(); - fail(); - } catch (ExecutionException e) { - assertTrue(e.getCause() instanceof TransactionCoordinatorClientException.CoordinatorNotFoundException); - } - - reconnect(); - fence(getPulsarServiceList().get(0).getTransactionMetadataStoreService()); - try { - transactionCoordinatorClient.commitAsync(new TxnID(0, 0)).get(); - fail(); - } catch (ExecutionException e) { - if (e.getCause() instanceof TransactionCoordinatorClientException.TransactionNotFoundException) { - assertEquals(e.getCause().getMessage(), "The transaction with this txdID `(0,0)`not found "); - } else { - assertEquals(e.getCause().getMessage(), "java.lang.Exception: Attempted to use a fenced managed ledger"); - } - } - reconnect(); - } - - @Test - public void testTransactionAddPublishPartitionToTxnReconnect() throws Exception { - TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); - start(); - - try { - transactionCoordinatorClient.addPublishPartitionToTxnAsync(new TxnID(0, 0), - Collections.singletonList("test")).get(); - fail(); - } catch (ExecutionException e) { - assertTrue(e.getCause() instanceof TransactionCoordinatorClientException.CoordinatorNotFoundException); - } - - reconnect(); - fence(getPulsarServiceList().get(0).getTransactionMetadataStoreService()); - try { - transactionCoordinatorClient.addPublishPartitionToTxnAsync(new TxnID(0, 0), - Collections.singletonList("test")).get(); - fail(); - } catch (ExecutionException e) { - if (e.getCause() instanceof TransactionCoordinatorClientException.TransactionNotFoundException) { - assertEquals(e.getCause().getMessage(), "The transaction with this txdID `(0,0)`not found "); - } else { - assertEquals(e.getCause().getMessage(), "java.lang.Exception: Attempted to use a fenced managed ledger"); - } - } - reconnect(); - } - - public void start() throws Exception { - // wait transaction coordinator init success - Awaitility.await().until(() -> { - try { - pulsarClient.newTransaction() - .withTransactionTimeout(200, TimeUnit.MILLISECONDS).build().get(); - } catch (Exception e) { - return false; - } - return true; - }); - pulsarClient.newTransaction() - .withTransactionTimeout(200, TimeUnit.MILLISECONDS).build().get(); - - TransactionMetadataStoreService transactionMetadataStoreService = - getPulsarServiceList().get(0).getTransactionMetadataStoreService(); - // remove transaction metadata store - transactionMetadataStoreService.removeTransactionMetadataStore(TransactionCoordinatorID.get(0)).get(); - - } - - public void fence(TransactionMetadataStoreService transactionMetadataStoreService) throws Exception { - Field field = ManagedLedgerImpl.class.getDeclaredField("state"); - field.setAccessible(true); - field.set(((MLTransactionMetadataStore) transactionMetadataStoreService.getStores() - .get(TransactionCoordinatorID.get(0))).getManagedLedger(), ManagedLedgerImpl.State.Fenced); - } - - public void reconnect() { - //reconnect - Awaitility.await().until(() -> { - try { - pulsarClient.newTransaction() - .withTransactionTimeout(200, TimeUnit.MILLISECONDS).build().get(); - } catch (Exception e) { - return false; - } - return true; - }); - } -} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java index 82fd1d1f9eb5e..d381487a82bb3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionConsumeTest.java @@ -36,12 +36,16 @@ import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.api.proto.MessageIdData; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.api.proto.TxnAction; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.TenantInfoImpl; @@ -75,6 +79,11 @@ public void setup() throws Exception { new TenantInfoImpl(Sets.newHashSet(), Sets.newHashSet(CLUSTER_NAME))); admin.namespaces().createNamespace("public/txn", 10); admin.topics().createNonPartitionedTopic(CONSUME_TOPIC); + + admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); + admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), 1); } @AfterMethod(alwaysRun = true) @@ -229,7 +238,12 @@ private void sendNormalMessages(Producer producer, int startMsgCnt, private List appendTransactionMessages( TxnID txnID, PersistentTopic topic, int transactionMsgCnt, List sendMessageList) - throws ExecutionException, InterruptedException { + throws ExecutionException, InterruptedException, PulsarClientException { + //Change the state of TB to Ready. + @Cleanup + Producer producer = PulsarClient.builder().serviceUrl(pulsarServiceList.get(0).getBrokerServiceUrl()) + .enableTransaction(true).build() + .newProducer(Schema.STRING).topic(CONSUME_TOPIC).sendTimeout(0, TimeUnit.SECONDS).create(); List positionList = new ArrayList<>(); for (int i = 0; i < transactionMsgCnt; i++) { final int j = i; @@ -239,7 +253,6 @@ private List appendTransactionMessages( .setTxnidMostBits(txnID.getMostSigBits()) .setTxnidLeastBits(txnID.getLeastSigBits()) .setPublishTime(System.currentTimeMillis()); - String msg = TXN_MSG_CONTENT + i; sendMessageList.add(msg); ByteBuf headerAndPayload = Commands.serializeMetadataAndPayload( diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java index 0e63f533b58c0..316c2f9f9df05 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionProduceTest.java @@ -19,7 +19,6 @@ package org.apache.pulsar.broker.transaction; import static java.nio.charset.StandardCharsets.UTF_8; -import com.google.common.collect.Sets; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.HashSet; @@ -50,10 +49,7 @@ import org.apache.pulsar.client.impl.transaction.TransactionImpl; import org.apache.pulsar.common.api.proto.MarkerType; import org.apache.pulsar.common.api.proto.MessageMetadata; -import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.protocol.Commands; import org.awaitility.Awaitility; import org.testng.Assert; @@ -69,9 +65,6 @@ public class TransactionProduceTest extends TransactionTestBase { private static final int TOPIC_PARTITION = 3; - - private static final String TENANT = "tnx"; - private static final String NAMESPACE1 = TENANT + "/ns1"; private static final String PRODUCE_COMMIT_TOPIC = NAMESPACE1 + "/produce-commit"; private static final String PRODUCE_ABORT_TOPIC = NAMESPACE1 + "/produce-abort"; private static final String ACK_COMMIT_TOPIC = NAMESPACE1 + "/ack-commit"; @@ -79,37 +72,10 @@ public class TransactionProduceTest extends TransactionTestBase { private static final int NUM_PARTITIONS = 16; @BeforeMethod protected void setup() throws Exception { - setBrokerCount(1); - internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); - admin.topics().createPartitionedTopic(PRODUCE_COMMIT_TOPIC, 3); - admin.topics().createPartitionedTopic(PRODUCE_ABORT_TOPIC, 3); - admin.topics().createPartitionedTopic(ACK_COMMIT_TOPIC, 3); - admin.topics().createPartitionedTopic(ACK_ABORT_TOPIC, 3); - - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), NUM_PARTITIONS); - - if (pulsarClient != null) { - pulsarClient.shutdown(); - } - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - - - // wait tc init success to ready state - waitForCoordinatorToBeAvailable(NUM_PARTITIONS); + setUpBase(1, NUM_PARTITIONS, PRODUCE_COMMIT_TOPIC, TOPIC_PARTITION); + admin.topics().createPartitionedTopic(PRODUCE_ABORT_TOPIC, TOPIC_PARTITION); + admin.topics().createPartitionedTopic(ACK_COMMIT_TOPIC, TOPIC_PARTITION); + admin.topics().createPartitionedTopic(ACK_ABORT_TOPIC, TOPIC_PARTITION); } @AfterMethod(alwaysRun = true) @@ -123,6 +89,35 @@ public void produceAndCommitTest() throws Exception { produceTest(true); } + @Test + public void testDeleteNamespaceBeforeCommit() throws Exception { + final String topic = "persistent://" + NAMESPACE3 + "/testDeleteTopicBeforeCommit"; + PulsarClient pulsarClient = this.pulsarClient; + Transaction tnx = pulsarClient.newTransaction() + .withTransactionTimeout(60, TimeUnit.SECONDS) + .build().get(); + long txnIdMostBits = ((TransactionImpl) tnx).getTxnIdMostBits(); + long txnIdLeastBits = ((TransactionImpl) tnx).getTxnIdLeastBits(); + Assert.assertTrue(txnIdMostBits > -1); + Assert.assertTrue(txnIdLeastBits > -1); + + @Cleanup + Producer outProducer = pulsarClient + .newProducer() + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .enableBatching(false) + .create(); + + String content = "Hello Txn"; + outProducer.newMessage(tnx).value(content.getBytes(UTF_8)).send(); + + try { + admin.namespaces().deleteNamespace(NAMESPACE3, true); + } catch (Exception ignore) { } + tnx.commit().get(); + } + @Test public void produceAndAbortTest() throws Exception { produceTest(false); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java index 971f8b24bce8b..72181ee53b711 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTest.java @@ -18,26 +18,83 @@ */ package org.apache.pulsar.broker.transaction; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl.TRANSACTION_LOG_PREFIX; -import com.google.common.collect.Sets; +import static org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore.PENDING_ACK_STORE_SUFFIX; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +import io.netty.buffer.Unpooled; +import io.netty.util.Timeout; import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.List; import java.util.Optional; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.common.util.Bytes; +import org.apache.bookkeeper.mledger.AsyncCallbacks; +import org.apache.bookkeeper.mledger.ManagedCursor; +import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.ManagedLedgerFactory; +import org.apache.bookkeeper.mledger.impl.ManagedCursorContainer; +import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerFactoryImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.intercept.BrokerInterceptor; +import org.apache.pulsar.broker.intercept.CounterBrokerInterceptor; +import org.apache.pulsar.broker.resources.NamespaceResources; +import org.apache.pulsar.broker.resources.PulsarResources; +import org.apache.pulsar.broker.service.BacklogQuotaManager; +import org.apache.pulsar.broker.service.BrokerService; +import org.apache.pulsar.broker.service.BrokerServiceException; import org.apache.pulsar.broker.service.Topic; +import org.apache.pulsar.broker.service.TransactionBufferSnapshotService; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.systopic.NamespaceEventsSystemTopicFactory; +import org.apache.pulsar.broker.systopic.SystemTopicClient; +import org.apache.pulsar.broker.transaction.buffer.TransactionBuffer; +import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBuffer; +import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferProvider; +import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferRecoverCallBack; +import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferState; import org.apache.pulsar.broker.transaction.pendingack.PendingAckStore; import org.apache.pulsar.broker.transaction.buffer.matadata.TransactionBufferSnapshot; +import org.apache.pulsar.broker.transaction.pendingack.TransactionPendingAckStoreProvider; +import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckReplyCallBack; import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore; import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStoreProvider; +import org.apache.pulsar.broker.transaction.pendingack.impl.PendingAckHandleImpl; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; @@ -51,16 +108,33 @@ import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.client.util.ExecutorProvider; +import org.apache.pulsar.common.api.proto.CommandSubscribe; +import org.apache.pulsar.client.impl.transaction.TransactionImpl; +import org.apache.pulsar.common.events.EventType; import org.apache.pulsar.common.events.EventsTopicNames; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats; import org.apache.pulsar.common.policies.data.RetentionPolicies; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicPolicies; +import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; +import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; +import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; +import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreState; +import org.apache.pulsar.transaction.coordinator.TransactionRecoverTracker; +import org.apache.pulsar.transaction.coordinator.TransactionTimeoutTracker; +import org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl; +import org.apache.pulsar.transaction.coordinator.impl.MLTransactionSequenceIdGenerator; +import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; import org.awaitility.Awaitility; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.powermock.reflect.Whitebox; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -73,36 +147,69 @@ @Test(groups = "broker") public class TransactionTest extends TransactionTestBase { - private static final String TENANT = "tnx"; - private static final String NAMESPACE1 = TENANT + "/ns1"; private static final int NUM_BROKERS = 1; private static final int NUM_PARTITIONS = 1; @BeforeMethod protected void setup() throws Exception { - this.setBrokerCount(NUM_BROKERS); - this.internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length - 1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder() - .serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); - - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), NUM_PARTITIONS); - pulsarClient.close(); - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - // wait tc init success to ready state - waitForCoordinatorToBeAvailable(NUM_PARTITIONS); + setUpBase(NUM_BROKERS, NUM_PARTITIONS, NAMESPACE1 + "/test", 0); + } + + @Test + public void testCreateTransactionSystemTopic() throws Exception { + String subName = "test"; + String topicName = TopicName.get(NAMESPACE1 + "/" + "testCreateTransactionSystemTopic").toString(); + + try { + // init pending ack + @Cleanup + Consumer consumer = getConsumer(topicName, subName); + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.SECONDS).build().get(); + + consumer.acknowledgeAsync(new MessageIdImpl(10, 10, 10), transaction).get(); + } catch (ExecutionException e) { + assertTrue(e.getCause() instanceof PulsarClientException.TransactionConflictException); + } + topicName = MLPendingAckStore.getTransactionPendingAckStoreSuffix(topicName, subName); + + // getList does not include transaction system topic + List list = admin.topics().getList(NAMESPACE1); + assertEquals(list.size(), 4); + list.forEach(topic -> assertFalse(topic.contains(PENDING_ACK_STORE_SUFFIX))); + + try { + // can't create transaction system topic + @Cleanup + Consumer consumer = getConsumer(topicName, subName); + fail(); + } catch (PulsarClientException.NotAllowedException e) { + assertTrue(e.getMessage().contains("Can not create transaction system topic")); + } + + // can't create transaction system topic + try { + admin.topics().getSubscriptions(topicName); + fail(); + } catch (PulsarAdminException e) { + assertEquals(e.getMessage(), "Can not create transaction system topic " + topicName); + } + + // can't create transaction system topic + try { + admin.topics().createPartitionedTopic(topicName, 3); + fail(); + } catch (PulsarAdminException e) { + assertEquals(e.getMessage(), "Cannot create topic in system topic format!"); + } + + // can't create transaction system topic + try { + admin.topics().createNonPartitionedTopic(topicName); + fail(); + } catch (PulsarAdminException e) { + assertEquals(e.getMessage(), "Cannot create topic in system topic format!"); + } } @Test @@ -295,4 +402,871 @@ public void testTakeSnapshotBeforeBuildTxnProducer() throws Exception { Assert.assertEquals(snapshot1.getMaxReadPositionEntryId(), 1); }); } -} \ No newline at end of file + + + @Test + public void testAppendBufferWithNotManageLedgerExceptionCanCastToMLE() + throws Exception { + String topic = "persistent://pulsar/system/testReCreateTopic"; + admin.topics().createNonPartitionedTopic(topic); + + PersistentTopic persistentTopic = + (PersistentTopic) pulsarServiceList.get(0).getBrokerService() + .getTopic(topic, false) + .get().get(); + CountDownLatch countDownLatch = new CountDownLatch(1); + Topic.PublishContext publishContext = new Topic.PublishContext() { + + @Override + public String getProducerName() { + return "test"; + } + + public long getSequenceId() { + return 30; + } + /** + * Return the producer name for the original producer. + * + * For messages published locally, this will return the same local producer name, though in case of + * replicated messages, the original producer name will differ + */ + public String getOriginalProducerName() { + return "test"; + } + + public long getOriginalSequenceId() { + return 30; + } + + public long getHighestSequenceId() { + return 30; + } + + public long getOriginalHighestSequenceId() { + return 30; + } + + public long getNumberOfMessages() { + return 30; + } + + @Override + public void completed(Exception e, long ledgerId, long entryId) { + Assert.assertTrue(e.getCause() instanceof ManagedLedgerException.ManagedLedgerAlreadyClosedException); + countDownLatch.countDown(); + } + }; + + //Close topic manageLedger. + persistentTopic.getManagedLedger().close(); + + //Publish to a closed managerLedger to test ManagerLedgerException. + persistentTopic.publishTxnMessage(new TxnID(123L, 321L), + Unpooled.copiedBuffer("message", UTF_8), publishContext); + + //If it times out, it means that the assertTrue in publishContext.completed is failed. + Awaitility.await().until(() -> { + countDownLatch.await(); + return true; + }); + } + + @Test + public void testMaxReadPositionForNormalPublish() throws Exception { + String topic = "persistent://" + NAMESPACE1 + "/NormalPublish"; + admin.topics().createNonPartitionedTopic(topic); + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0).getBrokerService() + .getTopic(topic, false).get().get(); + + TopicTransactionBuffer topicTransactionBuffer = (TopicTransactionBuffer) persistentTopic.getTransactionBuffer(); + PulsarClient noTxnClient = PulsarClient.builder().enableTransaction(false) + .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()).build(); + + //test the state of TransactionBuffer is NoSnapshot + //before build Producer by pulsarClient that enables transaction. + Producer normalProducer = noTxnClient.newProducer(Schema.STRING) + .producerName("testNormalPublish") + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + Awaitility.await().untilAsserted(() -> Assert.assertTrue(topicTransactionBuffer.checkIfNoSnapshot())); + + //test publishing normal messages will change maxReadPosition in the state of NoSnapshot. + MessageIdImpl messageId = (MessageIdImpl) normalProducer.newMessage().value("normal message").send(); + PositionImpl position = topicTransactionBuffer.getMaxReadPosition(); + Assert.assertEquals(position.getLedgerId(), messageId.getLedgerId()); + Assert.assertEquals(position.getEntryId(), messageId.getEntryId()); + + //test the state of TransactionBuffer is Ready after build Producer by pulsarClient that enables transaction. + Producer txnProducer = pulsarClient.newProducer(Schema.STRING) + .producerName("testTransactionPublish") + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + + Awaitility.await().untilAsserted(() -> Assert.assertTrue(topicTransactionBuffer.checkIfReady())); + //test publishing txn messages will not change maxReadPosition if don`t commit or abort. + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS).build().get(); + MessageIdImpl messageId1 = (MessageIdImpl) txnProducer.newMessage(transaction).value("txn message").send(); + PositionImpl position1 = topicTransactionBuffer.getMaxReadPosition(); + Assert.assertEquals(position1.getLedgerId(), messageId.getLedgerId()); + Assert.assertEquals(position1.getEntryId(), messageId.getEntryId()); + + MessageIdImpl messageId2 = (MessageIdImpl) normalProducer.newMessage().value("normal message").send(); + PositionImpl position2 = topicTransactionBuffer.getMaxReadPosition(); + Assert.assertEquals(position2.getLedgerId(), messageId.getLedgerId()); + Assert.assertEquals(position2.getEntryId(), messageId.getEntryId()); + transaction.commit().get(); + PositionImpl position3 = topicTransactionBuffer.getMaxReadPosition(); + + Assert.assertEquals(position3.getLedgerId(), messageId2.getLedgerId()); + Assert.assertEquals(position3.getEntryId(), messageId2.getEntryId() + 1); + + //test publishing normal messages will change maxReadPosition if the state of TB + //is Ready and ongoingTxns is empty. + MessageIdImpl messageId4 = (MessageIdImpl) normalProducer.newMessage().value("normal message").send(); + PositionImpl position4 = topicTransactionBuffer.getMaxReadPosition(); + Assert.assertEquals(position4.getLedgerId(), messageId4.getLedgerId()); + Assert.assertEquals(position4.getEntryId(), messageId4.getEntryId()); + + //test publishing normal messages will not change maxReadPosition if the state o TB is Initializing. + Class transactionBufferStateClass = + (Class) topicTransactionBuffer.getClass().getSuperclass(); + Field field = transactionBufferStateClass.getDeclaredField("state"); + field.setAccessible(true); + Class topicTransactionBufferClass = TopicTransactionBuffer.class; + Field maxReadPositionField = topicTransactionBufferClass.getDeclaredField("maxReadPosition"); + maxReadPositionField.setAccessible(true); + field.set(topicTransactionBuffer, TopicTransactionBufferState.State.Initializing); + MessageIdImpl messageId5 = (MessageIdImpl) normalProducer.newMessage().value("normal message").send(); + PositionImpl position5 = (PositionImpl) maxReadPositionField.get(topicTransactionBuffer); + Assert.assertEquals(position5.getLedgerId(), messageId4.getLedgerId()); + Assert.assertEquals(position5.getEntryId(), messageId4.getEntryId()); + } + + @Test + public void testEndTBRecoveringWhenManagerLedgerDisReadable() throws Exception{ + String topic = NAMESPACE1 + "/testEndTBRecoveringWhenManagerLedgerDisReadable"; + admin.topics().createNonPartitionedTopic(topic); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .producerName("test") + .enableBatching(false) + .sendTimeout(0, TimeUnit.SECONDS) + .topic(topic) + .create(); + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.SECONDS).build().get(); + + producer.newMessage(txn).value("test").send(); + + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0).getBrokerService() + .getTopic("persistent://" + topic, false).get().get(); + persistentTopic.getManagedLedger().getConfig().setAutoSkipNonRecoverableData(true); + + ManagedCursorImpl managedCursor = mock(ManagedCursorImpl.class); + doReturn("transaction-buffer-sub").when(managedCursor).getName(); + doReturn(true).when(managedCursor).hasMoreEntries(); + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.NonRecoverableLedgerException("No ledger exist"), + null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + Class managedLedgerClass = ManagedLedgerImpl.class; + Field field = managedLedgerClass.getDeclaredField("cursors"); + field.setAccessible(true); + ManagedCursorContainer managedCursors = (ManagedCursorContainer) field.get(persistentTopic.getManagedLedger()); + managedCursors.removeCursor("transaction-buffer-sub"); + managedCursors.add(managedCursor); + + TransactionBuffer buffer1 = new TopicTransactionBuffer(persistentTopic); + Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> + assertEquals(buffer1.getStats().state, "Ready")); + + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.ManagedLedgerFencedException(), null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + + TransactionBuffer buffer2 = new TopicTransactionBuffer(persistentTopic); + Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> + assertEquals(buffer2.getStats().state, "Ready")); + managedCursors.removeCursor("transaction-buffer-sub"); + + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.CursorAlreadyClosedException("test"), null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + + managedCursors.add(managedCursor); + TransactionBuffer buffer3 = new TopicTransactionBuffer(persistentTopic); + Awaitility.await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> + assertEquals(buffer3.getStats().state, "Ready")); + persistentTopic.getInternalStats(false).thenAccept(internalStats -> { + assertTrue(internalStats.cursors.isEmpty()); + }); + managedCursors.removeCursor("transaction-buffer-sub"); + } + + @Test + public void testEndTPRecoveringWhenManagerLedgerDisReadable() throws Exception{ + String topic = NAMESPACE1 + "/testEndTPRecoveringWhenManagerLedgerDisReadable"; + admin.topics().createNonPartitionedTopic(topic); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .producerName("test") + .enableBatching(false) + .sendTimeout(0, TimeUnit.SECONDS) + .topic(topic) + .create(); + producer.newMessage().send(); + + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0).getBrokerService() + .getTopic(topic, false).get().get(); + persistentTopic.getManagedLedger().getConfig().setAutoSkipNonRecoverableData(true); + PersistentSubscription persistentSubscription = (PersistentSubscription) persistentTopic + .createSubscription("test", + CommandSubscribe.InitialPosition.Earliest, false).get(); + + ManagedCursorImpl managedCursor = mock(ManagedCursorImpl.class); + doReturn(true).when(managedCursor).hasMoreEntries(); + doReturn(false).when(managedCursor).isClosed(); + doReturn(new PositionImpl(-1, -1)).when(managedCursor).getMarkDeletedPosition(); + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.NonRecoverableLedgerException("No ledger exist"), + null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + + TransactionPendingAckStoreProvider pendingAckStoreProvider = mock(TransactionPendingAckStoreProvider.class); + doReturn(CompletableFuture.completedFuture( + new MLPendingAckStore(persistentTopic.getManagedLedger(), managedCursor, null, 500))) + .when(pendingAckStoreProvider).newPendingAckStore(any()); + doReturn(CompletableFuture.completedFuture(true)).when(pendingAckStoreProvider).checkInitializedBefore(any()); + + Class pulsarServiceClass = PulsarService.class; + Field field = pulsarServiceClass.getDeclaredField("transactionPendingAckStoreProvider"); + field.setAccessible(true); + field.set(getPulsarServiceList().get(0), pendingAckStoreProvider); + + PendingAckHandleImpl pendingAckHandle1 = new PendingAckHandleImpl(persistentSubscription); + Awaitility.await().untilAsserted(() -> + assertEquals(pendingAckHandle1.getStats().state, "Ready")); + + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.ManagedLedgerFencedException(), null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + + PendingAckHandleImpl pendingAckHandle2 = new PendingAckHandleImpl(persistentSubscription); + Awaitility.await().untilAsserted(() -> + assertEquals(pendingAckHandle2.getStats().state, "Ready")); + + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.CursorAlreadyClosedException("test"), null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + + PendingAckHandleImpl pendingAckHandle3 = new PendingAckHandleImpl(persistentSubscription); + + Awaitility.await().untilAsserted(() -> + assertEquals(pendingAckHandle3.getStats().state, "Ready")); + } + + @Test + public void testEndTCRecoveringWhenManagerLedgerDisReadable() throws Exception{ + String topic = NAMESPACE1 + "/testEndTCRecoveringWhenManagerLedgerDisReadable"; + admin.topics().createNonPartitionedTopic(topic); + + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0).getBrokerService() + .getTopic(topic, false).get().get(); + persistentTopic.getManagedLedger().getConfig().setAutoSkipNonRecoverableData(true); + Map map = new HashMap<>(); + map.put(MLTransactionSequenceIdGenerator.MAX_LOCAL_TXN_ID, "1"); + persistentTopic.getManagedLedger().setProperties(map); + + ManagedCursor managedCursor = mock(ManagedCursor.class); + doReturn(true).when(managedCursor).hasMoreEntries(); + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.NonRecoverableLedgerException("No ledger exist"), + null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + persistentTopic.getManagedLedger().getConfig().setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); + MLTransactionLogImpl mlTransactionLog = + new MLTransactionLogImpl(new TransactionCoordinatorID(1), null, + persistentTopic.getManagedLedger().getConfig()); + Class mlTransactionLogClass = MLTransactionLogImpl.class; + Field field = mlTransactionLogClass.getDeclaredField("cursor"); + field.setAccessible(true); + field.set(mlTransactionLog, managedCursor); + field = mlTransactionLogClass.getDeclaredField("managedLedger"); + field.setAccessible(true); + field.set(mlTransactionLog, persistentTopic.getManagedLedger()); + + TransactionRecoverTracker transactionRecoverTracker = mock(TransactionRecoverTracker.class); + doNothing().when(transactionRecoverTracker).appendOpenTransactionToTimeoutTracker(); + doNothing().when(transactionRecoverTracker).handleCommittingAndAbortingTransaction(); + TransactionTimeoutTracker timeoutTracker = mock(TransactionTimeoutTracker.class); + doNothing().when(timeoutTracker).start(); + MLTransactionMetadataStore metadataStore1 = + new MLTransactionMetadataStore(new TransactionCoordinatorID(1), + mlTransactionLog, timeoutTracker, mlTransactionSequenceIdGenerator); + metadataStore1.init(transactionRecoverTracker).get(); + Awaitility.await().untilAsserted(() -> + assertEquals(metadataStore1.getCoordinatorStats().state, "Ready")); + + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.ManagedLedgerFencedException(), null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + + MLTransactionMetadataStore metadataStore2 = + new MLTransactionMetadataStore(new TransactionCoordinatorID(1), + mlTransactionLog, timeoutTracker, mlTransactionSequenceIdGenerator); + metadataStore2.init(transactionRecoverTracker).get(); + Awaitility.await().untilAsserted(() -> + assertEquals(metadataStore2.getCoordinatorStats().state, "Ready")); + + doAnswer(invocation -> { + AsyncCallbacks.ReadEntriesCallback callback = invocation.getArgument(1); + callback.readEntriesFailed(new ManagedLedgerException.CursorAlreadyClosedException("test"), null); + return null; + }).when(managedCursor).asyncReadEntries(anyInt(), any(), any(), any()); + + MLTransactionMetadataStore metadataStore3 = + new MLTransactionMetadataStore(new TransactionCoordinatorID(1), + mlTransactionLog, timeoutTracker, mlTransactionSequenceIdGenerator); + metadataStore3.init(transactionRecoverTracker).get(); + Awaitility.await().untilAsserted(() -> + assertEquals(metadataStore3.getCoordinatorStats().state, "Ready")); + } + + @Test + public void testEndTxnWhenCommittingOrAborting() throws Exception { + Transaction commitTxn = pulsarClient + .newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + Transaction abortTxn = pulsarClient + .newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + + Class transactionClass = TransactionImpl.class; + Field field = transactionClass.getDeclaredField("state"); + field.setAccessible(true); + + field.set(commitTxn, TransactionImpl.State.COMMITTING); + field.set(abortTxn, TransactionImpl.State.ABORTING); + + abortTxn.abort(); + commitTxn.commit(); + } + + @Test + public void testNoEntryCanBeReadWhenRecovery() throws Exception { + String topic = NAMESPACE1 + "/test"; + PersistentTopic persistentTopic = + (PersistentTopic) pulsarServiceList.get(0).getBrokerService() + .getTopic(TopicName.get(topic).toString(), true) + .get() + .get(); + + Class persistentTopicClass = PersistentTopic.class; + Field filed1 = persistentTopicClass.getDeclaredField("ledger"); + Field field2 = persistentTopicClass.getDeclaredField("transactionBuffer"); + filed1.setAccessible(true); + field2.setAccessible(true); + ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) spy(filed1.get(persistentTopic)); + filed1.set(persistentTopic, managedLedger); + + TopicTransactionBuffer topicTransactionBuffer = (TopicTransactionBuffer) field2.get(persistentTopic); + Method method = TopicTransactionBuffer.class.getDeclaredMethod("takeSnapshot"); + method.setAccessible(true); + CompletableFuture completableFuture = (CompletableFuture) method.invoke(topicTransactionBuffer); + completableFuture.get(); + + doReturn(PositionImpl.latest).when(managedLedger).getLastConfirmedEntry(); + ManagedCursorImpl managedCursor = mock(ManagedCursorImpl.class); + doReturn(false).when(managedCursor).hasMoreEntries(); + doReturn(managedCursor).when(managedLedger).newNonDurableCursor(any(), any()); + + TopicTransactionBuffer transactionBuffer = new TopicTransactionBuffer(persistentTopic); + Awaitility.await().untilAsserted(() -> Assert.assertTrue(transactionBuffer.checkIfReady())); + } + + @Test + public void testRetryExceptionOfEndTxn() throws Exception{ + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.SECONDS) + .build() + .get(); + Class transactionMetadataStoreStateClass = TransactionMetadataStoreState.class; + getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores() + .values() + .forEach((transactionMetadataStore -> { + try { + Field field = transactionMetadataStoreStateClass.getDeclaredField("state"); + field.setAccessible(true); + field.set(transactionMetadataStore, TransactionMetadataStoreState.State.Initializing); + } catch (Exception e) { + e.printStackTrace(); + } + })); + CompletableFuture completableFuture = transaction.commit(); + try { + completableFuture.get(5, TimeUnit.SECONDS); + fail(); + } catch (TimeoutException ignored) { + } + getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores() + .values() + .stream() + .forEach((transactionMetadataStore -> { + try { + Field field = transactionMetadataStoreStateClass.getDeclaredField("state"); + field.setAccessible(true); + field.set(transactionMetadataStore, TransactionMetadataStoreState.State.Ready); + } catch (Exception e) { + e.printStackTrace(); + } + })); + completableFuture.get(5, TimeUnit.SECONDS); + } + + @Test + public void testCancelTxnTimeout() throws Exception{ + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.SECONDS) + .build() + .get(); + + transaction.commit().get(); + + Field field = TransactionImpl.class.getDeclaredField("timeout"); + field.setAccessible(true); + Timeout timeout = (Timeout) field.get(transaction); + Assert.assertTrue(timeout.isCancelled()); + + transaction = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.SECONDS) + .build() + .get(); + + transaction.abort().get(); + timeout = (Timeout) field.get(transaction); + Assert.assertTrue(timeout.isCancelled()); + } + + @Test + public void testNotChangeMaxReadPositionAndAddAbortTimesWhenCheckIfNoSnapshot() throws Exception { + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService() + .getTopic(NAMESPACE1 + "/changeMaxReadPositionAndAddAbortTimes" + UUID.randomUUID(), true) + .get().get(); + TransactionBuffer buffer = persistentTopic.getTransactionBuffer(); + Field field = TopicTransactionBuffer.class.getDeclaredField("changeMaxReadPositionAndAddAbortTimes"); + field.setAccessible(true); + AtomicLong changeMaxReadPositionAndAddAbortTimes = (AtomicLong) field.get(buffer); + Field field1 = TopicTransactionBufferState.class.getDeclaredField("state"); + field1.setAccessible(true); + + Awaitility.await().untilAsserted(() -> { + TopicTransactionBufferState.State state = (TopicTransactionBufferState.State) field1.get(buffer); + Assert.assertEquals(state, TopicTransactionBufferState.State.NoSnapshot); + }); + Assert.assertEquals(changeMaxReadPositionAndAddAbortTimes.get(), 0L); + + buffer.syncMaxReadPositionForNormalPublish(new PositionImpl(1, 1)); + Assert.assertEquals(changeMaxReadPositionAndAddAbortTimes.get(), 0L); + + } + + @Test + public void testAutoCreateSchemaForTransactionSnapshot() throws Exception { + String namespace = TENANT + "/ns2"; + String topic = namespace + "/test"; + pulsarServiceList.forEach((pulsarService -> + pulsarService.getConfiguration().setAllowAutoUpdateSchemaEnabled(false))); + admin.namespaces().createNamespace(namespace); + admin.topics().createNonPartitionedTopic(topic); + TopicName transactionBufferTopicName = + NamespaceEventsSystemTopicFactory.getSystemTopicName( + TopicName.get(topic).getNamespaceObject(), EventType.TRANSACTION_BUFFER_SNAPSHOT); + TopicName transactionBufferTopicName1 = + NamespaceEventsSystemTopicFactory.getSystemTopicName( + TopicName.get(topic).getNamespaceObject(), EventType.TOPIC_POLICY); + Awaitility.await().untilAsserted(() -> { + SchemaInfo schemaInfo = admin + .schemas() + .getSchemaInfo(transactionBufferTopicName.toString()); + Assert.assertNotNull(schemaInfo); + SchemaInfo schemaInfo1 = admin + .schemas() + .getSchemaInfo(transactionBufferTopicName1.toString()); + Assert.assertNotNull(schemaInfo1); + }); + pulsarServiceList.forEach((pulsarService -> + pulsarService.getConfiguration().setAllowAutoUpdateSchemaEnabled(true))); + } + + @Test + public void testPendingAckMarkDeletePosition() throws Exception { + getPulsarServiceList().get(0).getConfig().setTransactionPendingAckLogIndexMinLag(1); + getPulsarServiceList().get(0).getConfiguration().setManagedLedgerDefaultMarkDeleteRateLimit(5); + String topic = NAMESPACE1 + "/test1"; + + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.BYTES) + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + + @Cleanup + Consumer consumer = pulsarClient + .newConsumer() + .topic(topic) + .subscriptionName("sub") + .subscribe(); + consumer.getSubscription(); + + PersistentSubscription persistentSubscription = (PersistentSubscription) getPulsarServiceList() + .get(0) + .getBrokerService() + .getTopic(topic, false) + .get() + .get() + .getSubscription("sub"); + + ManagedCursor subscriptionCursor = persistentSubscription.getCursor(); + + subscriptionCursor.getMarkDeletedPosition(); + //pendingAck add message1 and commit mark, metadata add message1 + //PersistentMarkDeletedPosition have not updated + producer.newMessage() + .value("test".getBytes(UTF_8)) + .send(); + Transaction transaction = pulsarClient + .newTransaction() + .withTransactionTimeout(5, TimeUnit.MINUTES) + .build().get(); + + Message message1 = consumer.receive(10, TimeUnit.SECONDS); + + consumer.acknowledgeAsync(message1.getMessageId(), transaction); + transaction.commit().get(); + //PersistentMarkDeletedPosition of subscription have updated to message1, + //check whether delete markDeletedPosition of pendingAck after append entry to pendingAck + transaction = pulsarClient + .newTransaction() + .withTransactionTimeout(5, TimeUnit.MINUTES) + .build().get(); + + producer.newMessage() + .value("test".getBytes(UTF_8)) + .send(); + Message message2 = consumer.receive(10, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message2.getMessageId(), transaction); + + Awaitility.await().untilAsserted(() -> { + ManagedLedgerInternalStats managedLedgerInternalStats = admin + .transactions() + .getPendingAckInternalStats(topic, "sub", false) + .pendingAckLogStats + .managedLedgerInternalStats; + String [] markDeletePosition = managedLedgerInternalStats.cursors.get("__pending_ack_state") + .markDeletePosition.split(":"); + String [] lastConfirmedEntry = managedLedgerInternalStats.lastConfirmedEntry.split(":"); + Assert.assertEquals(markDeletePosition[0], lastConfirmedEntry[0]); + //don`t contain commit mark and unCommitted message2 + Assert.assertEquals(Integer.parseInt(markDeletePosition[1]), + Integer.parseInt(lastConfirmedEntry[1]) - 2); + }); + } + + @Test + public void testConsistencyOfTransactionStatsAtEndTxn() throws Exception { + TransactionMetadataStore transactionMetadataStore = getPulsarServiceList().get(0) + .getTransactionMetadataStoreService() + .getStores() + .get(new TransactionCoordinatorID(0)); + + Field field = MLTransactionMetadataStore.class.getDeclaredField("transactionLog"); + field.setAccessible(true); + MLTransactionLogImpl transactionLog = (MLTransactionLogImpl) field.get(transactionMetadataStore); + Field field1 = MLTransactionLogImpl.class.getDeclaredField("cursor"); + field1.setAccessible(true); + ManagedCursorImpl managedCursor = (ManagedCursorImpl) field1.get(transactionLog); + managedCursor.close(); + + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + + transaction.commit().get(); + } + + @Test + public void testGetConnectExceptionForAckMsgWhenCnxIsNull() throws Exception { + String topic = NAMESPACE1 + "/testGetConnectExceptionForAckMsgWhenCnxIsNull"; + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.BYTES) + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + + @Cleanup + Consumer consumer = pulsarClient + .newConsumer() + .topic(topic) + .subscriptionName("sub") + .subscribe(); + + for (int i = 0; i < 10; i++) { + producer.newMessage().value(Bytes.toBytes(i)).send(); + } + ClientCnx cnx = Whitebox.invokeMethod(consumer, "cnx"); + Whitebox.invokeMethod(consumer, "connectionClosed", cnx); + + Message message = consumer.receive(); + Transaction transaction = pulsarClient + .newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + + try { + consumer.acknowledgeAsync(message.getMessageId(), transaction).get(); + fail(); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof PulsarClientException.ConnectException); + } + } + + + @Test + public void testPendingAckBatchMessageCommit() throws Exception { + String topic = NAMESPACE1 + "/testPendingAckBatchMessageCommit"; + + // enable batch index ack + conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); + + @Cleanup + Producer producer = pulsarClient + .newProducer(Schema.BYTES) + .topic(topic) + .enableBatching(true) + // ensure that batch message is sent + .batchingMaxPublishDelay(3, TimeUnit.SECONDS) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + + @Cleanup + Consumer consumer = pulsarClient + .newConsumer() + .subscriptionType(SubscriptionType.Shared) + .topic(topic) + .subscriptionName("sub") + .subscribe(); + + // send batch message, the size is 5 + for (int i = 0; i < 5; i++) { + producer.sendAsync(("test" + i).getBytes()); + } + + producer.flush(); + + Transaction txn1 = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.MINUTES).build().get(); + // ack the first message with transaction + consumer.acknowledgeAsync(consumer.receive().getMessageId(), txn1).get(); + Transaction txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.MINUTES).build().get(); + // ack the second message with transaction + MessageId messageId = consumer.receive().getMessageId(); + consumer.acknowledgeAsync(messageId, txn2).get(); + + // commit the txn1 + txn1.commit().get(); + // abort the txn2 + txn2.abort().get(); + + Transaction txn3 = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.MINUTES).build().get(); + // repeat ack the second message, can ack successful + consumer.acknowledgeAsync(messageId, txn3).get(); + } + + /** + * When change pending ack handle state failure, exceptionally complete cmd-subscribe. + * see: https://github.com/apache/pulsar/pull/16248. + */ + @Test + public void testPendingAckReplayChangeStateError() throws InterruptedException, TimeoutException { + AtomicInteger atomicInteger = new AtomicInteger(1); + // Create Executor + ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); + // Mock serviceConfiguration. + ServiceConfiguration serviceConfiguration = mock(ServiceConfiguration.class); + when(serviceConfiguration.isTransactionCoordinatorEnabled()).thenReturn(true); + // Mock executorProvider. + ExecutorProvider executorProvider = mock(ExecutorProvider.class); + when(executorProvider.getExecutor()).thenReturn(executorService); + when(executorProvider.getExecutor(any(Object.class))).thenReturn(executorService); + // Mock pendingAckStore. + PendingAckStore pendingAckStore = mock(PendingAckStore.class); + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + executorService.execute(()->{ + PendingAckHandleImpl pendingAckHandle = (PendingAckHandleImpl) invocation.getArguments()[0]; + pendingAckHandle.close(); + MLPendingAckReplyCallBack mlPendingAckReplyCallBack + = new MLPendingAckReplyCallBack(pendingAckHandle); + mlPendingAckReplyCallBack.replayComplete(); + }); + return null; + } + }).when(pendingAckStore).replayAsync(any(), any()); + // Mock executorProvider. + TransactionPendingAckStoreProvider pendingAckStoreProvider = mock(TransactionPendingAckStoreProvider.class); + when(pendingAckStoreProvider.checkInitializedBefore(any())) + .thenReturn(CompletableFuture.completedFuture(true)); + when(pendingAckStoreProvider.newPendingAckStore(any())) + .thenReturn(CompletableFuture.completedFuture(pendingAckStore)); + // Mock pulsar. + PulsarService pulsar = mock(PulsarService.class); + when(pulsar.getConfig()).thenReturn(serviceConfiguration); + when(pulsar.getTransactionExecutorProvider()).thenReturn(executorProvider); + when(pulsar.getTransactionPendingAckStoreProvider()).thenReturn(pendingAckStoreProvider); + // Mock brokerService. + BrokerService brokerService = mock(BrokerService.class); + when(brokerService.getPulsar()).thenReturn(pulsar); + // Mock topic. + PersistentTopic topic = mock(PersistentTopic.class); + when(topic.getBrokerService()).thenReturn(brokerService); + when(topic.getName()).thenReturn("topic-a"); + // Mock cursor for subscription. + ManagedCursor cursor_subscription = mock(ManagedCursor.class); + doThrow(new RuntimeException("1")).when(cursor_subscription).updateLastActive(); + // Create subscription. + String subscriptionName = "sub-a"; + boolean replicated = false; + PersistentSubscription persistentSubscription = new PersistentSubscription(topic, subscriptionName, + cursor_subscription, replicated); + org.apache.pulsar.broker.service.Consumer consumer = mock(org.apache.pulsar.broker.service.Consumer.class); + try { + CompletableFuture addConsumerFuture = persistentSubscription.addConsumer(consumer); + addConsumerFuture.get(5, TimeUnit.SECONDS); + fail("Expect failure by PendingAckHandle closed, but success"); + } catch (ExecutionException executionException){ + Throwable t = executionException.getCause(); + Assert.assertTrue(t instanceof BrokerServiceException.ServiceUnitNotReadyException); + } + } + + /** + * When change TB state failure, exceptionally complete cmd-producer. + * see: https://github.com/apache/pulsar/pull/16248. + */ + @Test + public void testTBRecoverChangeStateError() throws InterruptedException, TimeoutException { + final AtomicReference persistentTopic = new AtomicReference(); + AtomicInteger atomicInteger = new AtomicInteger(1); + // Create Executor + ScheduledExecutorService executorService_recover = mock(ScheduledExecutorService.class); + // Mock serviceConfiguration. + ServiceConfiguration serviceConfiguration = mock(ServiceConfiguration.class); + when(serviceConfiguration.isEnableReplicatedSubscriptions()).thenReturn(false); + when(serviceConfiguration.isTransactionCoordinatorEnabled()).thenReturn(true); + // Mock executorProvider. + ExecutorProvider executorProvider = mock(ExecutorProvider.class); + when(executorProvider.getExecutor(any(Object.class))).thenReturn(executorService_recover); + // Mock pendingAckStore. + PendingAckStore pendingAckStore = mock(PendingAckStore.class); + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + new Thread(() -> { + TopicTransactionBuffer.TopicTransactionBufferRecover recover + = (TopicTransactionBuffer.TopicTransactionBufferRecover)invocation.getArguments()[0]; + TopicTransactionBufferRecoverCallBack callBack + = Whitebox.getInternalState(recover, "callBack");; + try { + persistentTopic.get().getTransactionBuffer().closeAsync().get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + callBack.recoverComplete(); + }).start(); + return null; + } + }).when(executorService_recover).execute(any()); + // Mock executorProvider. + TransactionPendingAckStoreProvider pendingAckStoreProvider = mock(TransactionPendingAckStoreProvider.class); + when(pendingAckStoreProvider.checkInitializedBefore(any())) + .thenReturn(CompletableFuture.completedFuture(true)); + when(pendingAckStoreProvider.newPendingAckStore(any())) + .thenReturn(CompletableFuture.completedFuture(pendingAckStore)); + // Mock TransactionBufferSnapshotService + TransactionBufferSnapshotService transactionBufferSnapshotService + = mock(TransactionBufferSnapshotService.class); + SystemTopicClient.Writer writer = mock(SystemTopicClient.Writer.class); + when(writer.closeAsync()).thenReturn(CompletableFuture.completedFuture(null)); + when(transactionBufferSnapshotService.createWriter(any())) + .thenReturn(CompletableFuture.completedFuture(writer)); + // Mock pulsar. + PulsarService pulsar = mock(PulsarService.class); + when(pulsar.getConfiguration()).thenReturn(serviceConfiguration); + when(pulsar.getConfig()).thenReturn(serviceConfiguration); + when(pulsar.getTransactionExecutorProvider()).thenReturn(executorProvider); + when(pulsar.getTransactionBufferSnapshotService()).thenReturn(transactionBufferSnapshotService); + PulsarResources pulsarResources = mock(PulsarResources.class); + when(pulsar.getPulsarResources()).thenReturn(pulsarResources); + NamespaceResources nsResources = mock(NamespaceResources.class); + when(pulsarResources.getNamespaceResources()).thenReturn(nsResources); + TopicTransactionBufferProvider topicTransactionBufferProvider = new TopicTransactionBufferProvider(); + when(pulsar.getTransactionBufferProvider()).thenReturn(topicTransactionBufferProvider); + // Mock BacklogQuotaManager + BacklogQuotaManager backlogQuotaManager = mock(BacklogQuotaManager.class); + // Mock brokerService. + BrokerService brokerService = mock(BrokerService.class); + when(brokerService.getPulsar()).thenReturn(pulsar); + when(brokerService.pulsar()).thenReturn(pulsar); + when(brokerService.getBacklogQuotaManager()).thenReturn(backlogQuotaManager); + // Mock managedLedger. + ManagedLedgerImpl managedLedger = mock(ManagedLedgerImpl.class); + ManagedCursorContainer managedCursors = new ManagedCursorContainer(); + when(managedLedger.getCursors()).thenReturn(managedCursors); + PositionImpl position = PositionImpl.earliest; + when(managedLedger.getLastConfirmedEntry()).thenReturn(position); + // Create topic. + persistentTopic.set(new PersistentTopic("topic-a", managedLedger, brokerService)); + try { + // Do check. + persistentTopic.get().checkIfTransactionBufferRecoverCompletely(true).get(5, TimeUnit.SECONDS); + fail("Expect failure by TB closed, but it is finished."); + } catch (ExecutionException executionException){ + Throwable t = executionException.getCause(); + Assert.assertTrue(t instanceof BrokerServiceException.ServiceUnitNotReadyException); + } + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java index 622421b16c22b..b1a82802c1f46 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/TransactionTestBase.java @@ -18,9 +18,11 @@ */ package org.apache.pulsar.broker.transaction; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; +import com.google.common.collect.Sets; import com.google.common.util.concurrent.MoreExecutors; import io.netty.channel.EventLoopGroup; import java.util.ArrayList; @@ -45,10 +47,10 @@ import org.apache.pulsar.broker.auth.SameThreadOrderedSafeExecutor; import org.apache.pulsar.broker.intercept.CounterBrokerInterceptor; import org.apache.pulsar.broker.namespace.NamespaceService; -import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; -import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; -import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreState; -import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.metadata.impl.ZKMetadataStore; @@ -60,7 +62,6 @@ import org.apache.zookeeper.MockZooKeeper; import org.apache.zookeeper.MockZooKeeperSession; import org.apache.zookeeper.ZooKeeper; -import org.awaitility.Awaitility; @Slf4j public abstract class TransactionTestBase extends TestRetrySupport { @@ -83,6 +84,11 @@ public abstract class TransactionTestBase extends TestRetrySupport { private OrderedExecutor bkExecutor; private NonClosableMockBookKeeper mockBookKeeper; + public static final String TENANT = "tnx"; + protected static final String NAMESPACE1 = TENANT + "/ns1"; + protected static final String NAMESPACE3 = TENANT + "/ns3"; + protected ServiceConfiguration conf = new ServiceConfiguration(); + public void internalSetup() throws Exception { incrementSetupNumber(); init(); @@ -108,10 +114,42 @@ private void init() throws Exception { mockBookKeeper = createMockBookKeeper(bkExecutor); startBroker(); } + protected void setUpBase(int numBroker,int numPartitionsOfTC, String topic, int numPartitions) throws Exception{ + setBrokerCount(numBroker); + internalSetup(); + + String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); + String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; + admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + + webServicePort).build()); + + admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); + admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), numPartitionsOfTC); + if (topic != null) { + admin.tenants().createTenant(TENANT, + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + admin.namespaces().createNamespace(NAMESPACE1); + admin.namespaces().createNamespace(NAMESPACE3); + if (numPartitions == 0) { + admin.topics().createNonPartitionedTopic(topic); + } else { + admin.topics().createPartitionedTopic(topic, numPartitions); + } + } + if (pulsarClient != null) { + pulsarClient.shutdown(); + } + pulsarClient = PulsarClient.builder() + .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) + .statsInterval(0, TimeUnit.SECONDS) + .enableTransaction(true) + .build(); + } protected void startBroker() throws Exception { for (int i = 0; i < brokerCount; i++) { - ServiceConfiguration conf = new ServiceConfiguration(); conf.setClusterName(CLUSTER_NAME); conf.setAdvertisedAddress("localhost"); conf.setManagedLedgerCacheSizeMB(8); @@ -121,8 +159,7 @@ protected void startBroker() throws Exception { conf.setConfigurationStoreServers("localhost:3181"); conf.setAllowAutoTopicCreationType("non-partitioned"); conf.setBookkeeperClientExposeStatsToPrometheus(true); - conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); - + conf.setForceDeleteNamespaceAllowed(true); conf.setBrokerShutdownTimeoutMs(0L); conf.setBrokerServicePort(Optional.of(0)); conf.setBrokerServicePortTls(Optional.of(0)); @@ -137,7 +174,7 @@ protected void startBroker() throws Exception { conf.setTopicLevelPoliciesEnabled(true); serviceConfigurationList.add(conf); - PulsarService pulsar = spy(new PulsarService(conf)); + PulsarService pulsar = spyWithClassAndConstructorArgs(PulsarService.class, conf); setupBrokerMocks(pulsar); pulsar.start(); @@ -153,7 +190,8 @@ protected void setupBrokerMocks(PulsarService pulsar) throws Exception { MockZooKeeperSession mockZooKeeperSession = MockZooKeeperSession.newInstance(mockZooKeeper); doReturn(new ZKMetadataStore(mockZooKeeperSession)).when(pulsar).createLocalMetadataStore(); doReturn(new ZKMetadataStore(mockZooKeeperSession)).when(pulsar).createConfigurationMetadataStore(); - Supplier namespaceServiceSupplier = () -> spy(new NamespaceService(pulsar)); + Supplier namespaceServiceSupplier = + () -> spyWithClassAndConstructorArgs(NamespaceService.class, pulsar); doReturn(namespaceServiceSupplier).when(pulsar).getNamespaceServiceProvider(); SameThreadOrderedSafeExecutor executor = new SameThreadOrderedSafeExecutor(); @@ -246,7 +284,7 @@ protected final void internalCleanup() { admin = null; } if (pulsarClient != null) { - pulsarClient.shutdown(); + pulsarClient.close(); pulsarClient = null; } if (pulsarServiceList.size() > 0) { @@ -293,22 +331,4 @@ protected final void internalCleanup() { log.warn("Failed to clean up mocked pulsar service:", e); } } - public void waitForCoordinatorToBeAvailable(int numOfTCPerBroker){ - // wait tc init success to ready state - Awaitility.await().until(() -> { - Map stores = - getPulsarServiceList().get(brokerCount-1).getTransactionMetadataStoreService().getStores(); - if (stores.size() == numOfTCPerBroker) { - for (TransactionCoordinatorID transactionCoordinatorID : stores.keySet()) { - if (((MLTransactionMetadataStore) stores.get(transactionCoordinatorID)).getState() - != TransactionMetadataStoreState.State.Ready) { - return false; - } - } - return true; - } else { - return false; - } - }); - } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/InMemTransactionBufferReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/InMemTransactionBufferReaderTest.java index 513a78da429dc..d4234fbd16610 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/InMemTransactionBufferReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/InMemTransactionBufferReaderTest.java @@ -33,7 +33,7 @@ import java.util.TreeMap; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; -import org.apache.pulsar.broker.transaction.buffer.exceptions.EndOfTransactionException; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.broker.transaction.buffer.impl.InMemTransactionBufferReader; import org.apache.pulsar.client.api.transaction.TxnID; import org.testng.annotations.Test; @@ -116,7 +116,7 @@ public void testEndOfTransactionException() throws Exception { reader.readNext(1).get(); fail("should fail to read entries if there is no more in the transaction buffer"); } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof EndOfTransactionException); + assertTrue(ee.getCause() instanceof TransactionBufferException.EndOfTransactionException); } } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java new file mode 100644 index 0000000000000..576ef647248d4 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TopicTransactionBufferTest.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.buffer; + +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.transaction.Transaction; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; +import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; +import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreState; +import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; +import org.awaitility.Awaitility; +import org.powermock.reflect.Whitebox; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class TopicTransactionBufferTest extends TransactionTestBase { + + + @BeforeMethod(alwaysRun = true) + protected void setup() throws Exception { + setBrokerCount(1); + setUpBase(1, 16, "persistent://" + NAMESPACE1 + "/test", 0); + + Map stores = + getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores(); + Awaitility.await().until(() -> { + if (stores.size() == 16) { + for (TransactionCoordinatorID transactionCoordinatorID : stores.keySet()) { + if (((MLTransactionMetadataStore) stores.get(transactionCoordinatorID)).getState() + != TransactionMetadataStoreState.State.Ready) { + return false; + } + } + return true; + } else { + return false; + } + }); + } + + @AfterMethod(alwaysRun = true) + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Test + public void testTransactionBufferAppendMarkerWriteFailState() throws Exception { + final String topic = "persistent://" + NAMESPACE1 + "/testPendingAckManageLedgerWriteFailState"; + Transaction txn = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + + Producer producer = pulsarClient + .newProducer() + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .enableBatching(false) + .create(); + + producer.newMessage(txn).value("test".getBytes()).send(); + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService().getTopic(TopicName.get(topic).toString(), false).get().get(); + Whitebox.setInternalState(persistentTopic.getManagedLedger(), "state", ManagedLedgerImpl.State.WriteFailed); + txn.commit().get(); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java index 0082d81f5fe41..66460778dc22d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferClientTest.java @@ -27,17 +27,16 @@ import lombok.Cleanup; import java.util.ArrayList; import java.util.List; -import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; -import org.apache.pulsar.broker.service.Topic; -import org.apache.pulsar.broker.service.persistent.PersistentSubscription; +import org.apache.pulsar.broker.PulsarServerException; +import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.transaction.TransactionTestBase; import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferClientImpl; import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferHandlerImpl; import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.transaction.TransactionBufferClient; import org.apache.pulsar.client.api.transaction.TransactionBufferClientException; @@ -49,6 +48,8 @@ import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; @@ -84,8 +85,8 @@ protected void setup() throws Exception { new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); admin.namespaces().createNamespace(namespace, 10); admin.topics().createPartitionedTopic(partitionedTopicName.getPartitionedTopicName(), partitions); - tbClient = TransactionBufferClientImpl.create(pulsarClient, - new HashedWheelTimer(new DefaultThreadFactory("transaction-buffer"))); + tbClient = TransactionBufferClientImpl.create(pulsarServiceList.get(0), + new HashedWheelTimer(new DefaultThreadFactory("transaction-buffer")), 1000, 3000); } @Override @@ -149,34 +150,42 @@ public void testAbortOnSubscription() throws ExecutionException, InterruptedExce @Test public void testTransactionBufferClientTimeout() throws Exception { - PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + PulsarService pulsarService = pulsarServiceList.get(0); + PulsarClient mockClient = mock(PulsarClientImpl.class); CompletableFuture completableFuture = new CompletableFuture<>(); ClientCnx clientCnx = mock(ClientCnx.class); completableFuture.complete(clientCnx); - when(mockClient.getConnection(anyString())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(anyString())).thenReturn(completableFuture); ChannelHandlerContext cnx = mock(ChannelHandlerContext.class); when(clientCnx.ctx()).thenReturn(cnx); Channel channel = mock(Channel.class); when(cnx.channel()).thenReturn(channel); + when(pulsarService.getClient()).thenAnswer(new Answer(){ + + @Override + public PulsarClient answer(InvocationOnMock invocation) throws Throwable { + return mockClient; + } + }); when(channel.isActive()).thenReturn(true); @Cleanup("stop") HashedWheelTimer hashedWheelTimer = new HashedWheelTimer(); TransactionBufferHandlerImpl transactionBufferHandler = - new TransactionBufferHandlerImpl(mockClient, hashedWheelTimer); + new TransactionBufferHandlerImpl(pulsarService, hashedWheelTimer, 1000, 3000); CompletableFuture endFuture = transactionBufferHandler.endTxnOnTopic("test", 1, 1, TxnAction.ABORT, 1); - Field field = TransactionBufferHandlerImpl.class.getDeclaredField("pendingRequests"); + Field field = TransactionBufferHandlerImpl.class.getDeclaredField("outstandingRequests"); field.setAccessible(true); - ConcurrentSkipListMap pendingRequests = + ConcurrentSkipListMap outstandingRequests = (ConcurrentSkipListMap) field.get(transactionBufferHandler); - assertEquals(pendingRequests.size(), 1); + assertEquals(outstandingRequests.size(), 1); Awaitility.await().atLeast(2, TimeUnit.SECONDS).until(() -> { - if (pendingRequests.size() == 0) { + if (outstandingRequests.size() == 0) { return true; } return false; @@ -191,23 +200,31 @@ public void testTransactionBufferClientTimeout() throws Exception { } @Test - public void testTransactionBufferChannelUnActive() { - PulsarClientImpl mockClient = mock(PulsarClientImpl.class); + public void testTransactionBufferChannelUnActive() throws PulsarServerException { + PulsarService pulsarService = pulsarServiceList.get(0); + PulsarClient mockClient = mock(PulsarClientImpl.class); CompletableFuture completableFuture = new CompletableFuture<>(); ClientCnx clientCnx = mock(ClientCnx.class); completableFuture.complete(clientCnx); - when(mockClient.getConnection(anyString())).thenReturn(completableFuture); + when(((PulsarClientImpl)mockClient).getConnection(anyString())).thenReturn(completableFuture); ChannelHandlerContext cnx = mock(ChannelHandlerContext.class); when(clientCnx.ctx()).thenReturn(cnx); Channel channel = mock(Channel.class); when(cnx.channel()).thenReturn(channel); when(channel.isActive()).thenReturn(false); + when(pulsarService.getClient()).thenAnswer(new Answer(){ + + @Override + public PulsarClient answer(InvocationOnMock invocation) throws Throwable { + return mockClient; + } + }); @Cleanup("stop") HashedWheelTimer hashedWheelTimer = new HashedWheelTimer(); TransactionBufferHandlerImpl transactionBufferHandler = - new TransactionBufferHandlerImpl(mockClient, hashedWheelTimer); + new TransactionBufferHandlerImpl(pulsarServiceList.get(0), hashedWheelTimer, 1000, 3000); try { transactionBufferHandler.endTxnOnTopic("test", 1, 1, TxnAction.ABORT, 1).get(); fail(); @@ -247,18 +264,8 @@ public void testTransactionBufferLookUp() throws Exception { } @Test - public void testTransactionBufferHandlerSemaphore() throws Exception { - - Field field = TransactionBufferClientImpl.class.getDeclaredField("tbHandler"); - field.setAccessible(true); - TransactionBufferHandlerImpl transactionBufferHandler = (TransactionBufferHandlerImpl) field.get(tbClient); - - field = TransactionBufferHandlerImpl.class.getDeclaredField("semaphore"); - field.setAccessible(true); - field.set(transactionBufferHandler, new Semaphore(2)); - - - String topic = "persistent://" + namespace + "/testTransactionBufferHandlerSemaphore"; + public void testTransactionBufferRequestCredits() throws Exception { + String topic = "persistent://" + namespace + "/testTransactionBufferRequestCredits"; String subName = "test"; String abortTopic = topic + "_abort_sub"; @@ -271,11 +278,17 @@ public void testTransactionBufferHandlerSemaphore() throws Exception { admin.topics().createSubscription(commitTopic, subName, MessageId.earliest); tbClient.abortTxnOnSubscription(abortTopic, "test", 1L, 1L, -1L).get(); - tbClient.commitTxnOnSubscription(commitTopic, "test", 1L, 1L, -1L).get(); tbClient.abortTxnOnTopic(abortTopic, 1L, 1L, -1L).get(); tbClient.commitTxnOnTopic(commitTopic, 1L, 1L, -1L).get(); + + assertEquals(tbClient.getAvailableRequestCredits(), 1000); + } + + @Test + public void testTransactionBufferPendingRequests() throws Exception { + } @Test @@ -302,22 +315,4 @@ public void testEndSubNotExist() throws Exception { tbClient.abortTxnOnSubscription(topic + "_abort_topic", sub, 1L, 1L, -1L).get(); tbClient.abortTxnOnSubscription(topic + "_commit_topic", sub, 1L, 1L, -1L).get(); } - - private void waitPendingAckInit(String topic, String sub) throws Exception { - - boolean exist = false; - for (int i = 0; i < getPulsarServiceList().size(); i++) { - CompletableFuture> completableFuture = getPulsarServiceList().get(i) - .getBrokerService().getTopics().get(topic); - if (completableFuture != null) { - PersistentSubscription persistentSubscription = - (PersistentSubscription) completableFuture.get().get().getSubscription(sub); - Awaitility.await().untilAsserted(() -> - assertEquals(persistentSubscription.getTransactionPendingAckStats().state, "Ready")); - exist = true; - } - } - - assertTrue(exist); - } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferCloseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferCloseTest.java new file mode 100644 index 0000000000000..43d31e7f9ffcd --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferCloseTest.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.buffer; + +import com.google.common.collect.Sets; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.common.events.EventsTopicNames; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicDomain; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.PublisherStats; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Transaction buffer close test. + */ +@Slf4j +@Test(groups = "broker") +public class TransactionBufferCloseTest extends TransactionTestBase { + + @BeforeMethod + protected void setup() throws Exception { + setUpBase(1, 16, null, 0); + Awaitility.await().until(() -> ((PulsarClientImpl) pulsarClient) + .getTcClient().getState() == TransactionCoordinatorClient.State.READY); + admin.tenants().createTenant(TENANT, + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + } + + @AfterMethod(alwaysRun = true) + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @DataProvider(name = "isPartition") + public Object[][] isPartition() { + return new Object[][]{ + { true }, { false } + }; + } + + @Test(timeOut = 10_000, dataProvider = "isPartition") + public void deleteTopicCloseTransactionBufferTest(boolean isPartition) throws Exception { + int expectedCount = isPartition ? 30 : 1; + TopicName topicName = createAndLoadTopic(isPartition, expectedCount); + checkSnapshotPublisherCount(topicName.getNamespace(), expectedCount); + if (isPartition) { + admin.topics().deletePartitionedTopic(topicName.getPartitionedTopicName(), true); + } else { + admin.topics().delete(topicName.getPartitionedTopicName(), true); + } + checkSnapshotPublisherCount(topicName.getNamespace(), 0); + } + + @Test(timeOut = 10_000, dataProvider = "isPartition") + public void unloadTopicCloseTransactionBufferTest(boolean isPartition) throws Exception { + int expectedCount = isPartition ? 30 : 1; + TopicName topicName = createAndLoadTopic(isPartition, expectedCount); + checkSnapshotPublisherCount(topicName.getNamespace(), expectedCount); + admin.topics().unload(topicName.getPartitionedTopicName()); + checkSnapshotPublisherCount(topicName.getNamespace(), 0); + } + + private TopicName createAndLoadTopic(boolean isPartition, int partitionCount) + throws PulsarAdminException, PulsarClientException { + String namespace = TENANT + "/ns-" + RandomStringUtils.randomAlphabetic(5); + admin.namespaces().createNamespace(namespace, 3); + String topic = namespace + "/tb-close-test-"; + if (isPartition) { + admin.topics().createPartitionedTopic(topic, partitionCount); + } + pulsarClient.newProducer() + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create() + .close(); + return TopicName.get(topic); + } + + private void checkSnapshotPublisherCount(String namespace, int expectCount) throws PulsarAdminException { + TopicName snTopicName = TopicName.get(TopicDomain.persistent.value(), NamespaceName.get(namespace), + EventsTopicNames.TRANSACTION_BUFFER_SNAPSHOT); + List publisherStatsList = + (List) admin.topics() + .getStats(snTopicName.getPartitionedTopicName()).getPublishers(); + Assert.assertEquals(publisherStatsList.size(), expectCount); + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java new file mode 100644 index 0000000000000..af4e442f617a3 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferHandlerImplTest.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.buffer; + +import org.apache.pulsar.broker.PulsarServerException; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.namespace.NamespaceEphemeralData; +import org.apache.pulsar.broker.namespace.NamespaceService; +import org.apache.pulsar.broker.transaction.buffer.impl.TransactionBufferHandlerImpl; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.common.api.proto.TxnAction; +import org.apache.pulsar.common.naming.NamespaceBundle; +import org.testng.annotations.Test; + +import java.util.Optional; +import java.util.concurrent.CompletableFuture; + +@Test(groups = "broker") +public class TransactionBufferHandlerImplTest { + + @Test + public void testRequestCredits() throws PulsarServerException { + PulsarClient pulsarClient = mock(PulsarClientImpl.class); + PulsarService pulsarService = mock(PulsarService.class); + NamespaceService namespaceService = mock(NamespaceService.class); + when(pulsarService.getNamespaceService()).thenReturn(namespaceService); + when(pulsarService.getClient()).thenReturn(pulsarClient); + when(namespaceService.getBundleAsync(any())).thenReturn(CompletableFuture.completedFuture(mock(NamespaceBundle.class))); + Optional opData = Optional.empty(); + when(namespaceService.getOwnerAsync(any())).thenReturn(CompletableFuture.completedFuture(opData)); + when(((PulsarClientImpl)pulsarClient).getConnection(anyString())).thenReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))); + TransactionBufferHandlerImpl handler = spy(new TransactionBufferHandlerImpl(pulsarService, null, 1000, 3000)); + doNothing().when(handler).endTxn(any()); + doReturn(CompletableFuture.completedFuture(mock(ClientCnx.class))).when(handler).getClientCnx(anyString()); + for (int i = 0; i < 500; i++) { + handler.endTxnOnTopic("public/default/t", 1L, 1L, TxnAction.COMMIT, 1L); + } + assertEquals(handler.getAvailableRequestCredits(), 500); + for (int i = 0; i < 500; i++) { + handler.endTxnOnTopic("public/default/t", 1L, 1L, TxnAction.COMMIT, 1L); + } + assertEquals(handler.getAvailableRequestCredits(), 0); + handler.endTxnOnTopic("public/default/t", 1L, 1L, TxnAction.COMMIT, 1L); + assertEquals(handler.getPendingRequestsCount(), 1); + handler.onResponse(null); + assertEquals(handler.getAvailableRequestCredits(), 0); + assertEquals(handler.getPendingRequestsCount(), 0); + } + + @Test + public void testMinRequestCredits() throws PulsarServerException { + PulsarClient pulsarClient = mock(PulsarClientImpl.class); + PulsarService pulsarService = mock(PulsarService.class); + when(pulsarService.getClient()).thenReturn(pulsarClient); + TransactionBufferHandlerImpl handler = spy(new TransactionBufferHandlerImpl(pulsarService, null, 50, 3000)); + assertEquals(handler.getAvailableRequestCredits(), 100); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferTest.java index 223849048d57f..c10444afe3c19 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionBufferTest.java @@ -33,11 +33,9 @@ import java.util.concurrent.ExecutionException; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.broker.transaction.exception.buffer.TransactionBufferException; import org.apache.pulsar.broker.transaction.buffer.impl.InMemTransactionBufferProvider; import org.apache.pulsar.client.api.transaction.TxnID; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionNotFoundException; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionNotSealedException; -import org.apache.pulsar.broker.transaction.buffer.exceptions.TransactionStatusException; import org.apache.pulsar.transaction.coordinator.proto.TxnStatus; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -86,7 +84,7 @@ public void testOpenReaderOnNonExistentTxn() throws Exception { buffer.openTransactionBufferReader(txnId, 0L).get(); fail("Should fail to open reader if a transaction doesn't exist"); } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof TransactionNotFoundException); + assertTrue(ee.getCause() instanceof TransactionBufferException.TransactionNotFoundException); } } @@ -102,7 +100,7 @@ public void testOpenReaderOnAnOpenTxn() throws Exception { buffer.openTransactionBufferReader(txnId, 0L).get(); fail("Should fail to open a reader on an OPEN transaction"); } catch (ExecutionException e) { - assertTrue(e.getCause() instanceof TransactionNotSealedException); + assertTrue(e.getCause() instanceof TransactionBufferException.TransactionNotSealedException); } } @@ -136,7 +134,7 @@ public void testCommitNonExistentTxn() throws Exception { buffer.commitTxn(txnId, Long.MIN_VALUE).get(); fail("Should fail to commit a transaction if it doesn't exist"); } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof TransactionNotFoundException); + assertTrue(ee.getCause() instanceof TransactionBufferException.TransactionNotFoundException); } } @@ -160,7 +158,7 @@ public void testAbortNonExistentTxn() throws Exception { buffer.abortTxn(txnId, Long.MIN_VALUE).get(); fail("Should fail to abort a transaction if it doesn't exist"); } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof TransactionNotFoundException); + assertTrue(ee.getCause() instanceof TransactionBufferException.TransactionNotFoundException); } } @@ -181,7 +179,7 @@ public void testAbortCommittedTxn() throws Exception { buffer.abortTxn(txnId, Long.MIN_VALUE).get(); fail("Should fail to abort a committed transaction"); } catch (ExecutionException e) { - assertTrue(e.getCause() instanceof TransactionStatusException); + assertTrue(e.getCause() instanceof TransactionBufferException.TransactionStatusException); } txnMeta = buffer.getTransactionMeta(txnId).get(); assertEquals(txnId, txnMeta.id()); @@ -277,7 +275,7 @@ private void verifyTxnNotExist(TxnID txnID) throws Exception { buffer.getTransactionMeta(txnID).get(); fail("Should fail to get transaction metadata if it doesn't exist"); } catch (ExecutionException ee) { - assertTrue(ee.getCause() instanceof TransactionNotFoundException); + assertTrue(ee.getCause() instanceof TransactionBufferException.TransactionNotFoundException); } } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionLowWaterMarkTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionLowWaterMarkTest.java index db9d4073b024f..3efdc2473bb21 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionLowWaterMarkTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionLowWaterMarkTest.java @@ -23,31 +23,28 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - -import com.google.common.collect.Sets; - import java.lang.reflect.Field; import java.util.HashMap; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - -import javax.validation.constraints.AssertTrue; +import java.util.concurrent.atomic.AtomicLong; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; - import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.commons.collections4.map.LinkedMap; import org.apache.pulsar.broker.service.BrokerService; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBuffer; import org.apache.pulsar.broker.transaction.pendingack.impl.PendingAckHandleImpl; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.transaction.Transaction; @@ -55,19 +52,15 @@ import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.transaction.TransactionImpl; -import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreState; -import org.apache.pulsar.transaction.coordinator.exceptions.CoordinatorException; import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; import org.awaitility.Awaitility; +import org.powermock.reflect.Whitebox; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -80,35 +73,12 @@ @Test(groups = "broker") public class TransactionLowWaterMarkTest extends TransactionTestBase { - private static final String TENANT = "tnx"; - private static final String NAMESPACE1 = TENANT + "/ns1"; - private static final String TOPIC = NAMESPACE1 + "/test-topic"; + private static final String TOPIC = "persistent://" + NAMESPACE1 + "/test-topic"; @BeforeMethod(alwaysRun = true) protected void setup() throws Exception { - setBrokerCount(1); - internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); - admin.topics().createNonPartitionedTopic(TOPIC); - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), 16); - - if (pulsarClient != null) { - pulsarClient.shutdown(); - } - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); + setUpBase(1, 16, TOPIC, 0); + Map stores = getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores(); Awaitility.await().until(() -> { @@ -248,7 +218,7 @@ public void testPendingAckLowWaterMark() throws Exception { ConcurrentOpenHashMap>> topics = (ConcurrentOpenHashMap>>) field .get(getPulsarServiceList().get(i).getBrokerService()); - CompletableFuture> completableFuture = topics.get("persistent://" + TOPIC); + CompletableFuture> completableFuture = topics.get(TOPIC); if (completableFuture != null) { Optional topic = completableFuture.get(); if (topic.isPresent()) { @@ -320,4 +290,183 @@ public void testPendingAckLowWaterMark() throws Exception { fail(); } } + + @Test + public void testTBLowWaterMarkEndToEnd() throws Exception { + Transaction txn1 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + Transaction txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + while (txn2.getTxnID().getMostSigBits() != txn1.getTxnID().getMostSigBits()) { + txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + } + + @Cleanup + Producer producer = pulsarClient + .newProducer() + .topic(TOPIC) + .sendTimeout(0, TimeUnit.SECONDS) + .enableBatching(false) + .create(); + + producer.newMessage(txn1).send(); + producer.newMessage(txn2).send(); + + txn1.commit().get(); + txn2.commit().get(); + + Field field = TransactionImpl.class.getDeclaredField("state"); + field.setAccessible(true); + field.set(txn1, TransactionImpl.State.OPEN); + + AtomicLong pendingWriteOps = Whitebox.getInternalState(getPulsarServiceList().get(0) + .getBrokerService().getTopic(TopicName.get(TOPIC).toString(), + false).get().get(), "pendingWriteOps"); + try { + producer.newMessage(txn1).send(); + fail(); + } catch (PulsarClientException.NotAllowedException ignore) { + // no-op + } + + assertEquals(pendingWriteOps.get(), 0); + } + + @Test + public void testLowWaterMarkForDifferentTC() throws Exception { + String subName = "sub"; + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(TOPIC) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(TOPIC) + .subscriptionName(subName) + .subscribe(); + + Transaction txn1 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + Transaction txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + while (txn2.getTxnID().getMostSigBits() == txn1.getTxnID().getMostSigBits()) { + txn2 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + } + Transaction txn3 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + while (txn3.getTxnID().getMostSigBits() != txn2.getTxnID().getMostSigBits()) { + txn3 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + } + + Transaction txn4 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + while (txn4.getTxnID().getMostSigBits() != txn1.getTxnID().getMostSigBits()) { + txn4 = pulsarClient.newTransaction() + .withTransactionTimeout(500, TimeUnit.SECONDS) + .build().get(); + } + + for (int i = 0; i < 10; i++) { + producer.newMessage().send(); + } + + producer.newMessage(txn1).send(); + producer.newMessage(txn2).send(); + producer.newMessage(txn3).send(); + producer.newMessage(txn4).send(); + + Message message1 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message1.getMessageId(), txn1); + Message message2 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message2.getMessageId(), txn2); + Message message3 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message3.getMessageId(), txn3); + Message message4 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message4.getMessageId(), txn4); + + txn1.commit().get(); + txn2.commit().get(); + + Field field = TransactionImpl.class.getDeclaredField("state"); + field.setAccessible(true); + field.set(txn1, TransactionImpl.State.OPEN); + field.set(txn2, TransactionImpl.State.OPEN); + + producer.newMessage(txn1).send(); + producer.newMessage(txn2).send(); + + Message message5 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message5.getMessageId(), txn1); + Message message6 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message6.getMessageId(), txn2); + + txn3.commit().get(); + TxnID txnID1 = txn1.getTxnID(); + TxnID txnID2 = txn2.getTxnID(); + Awaitility.await().untilAsserted(() -> { + assertTrue(checkTxnIsOngoingInTP(txnID1, subName)); + assertTrue(checkTxnIsOngoingInTP(txnID2, subName)); + assertTrue(checkTxnIsOngoingInTB(txnID1)); + assertTrue(checkTxnIsOngoingInTB(txnID2)); + }); + + txn4.commit().get(); + + Awaitility.await().untilAsserted(() -> { + assertFalse(checkTxnIsOngoingInTP(txnID1, subName)); + assertFalse(checkTxnIsOngoingInTP(txnID2, subName)); + assertFalse(checkTxnIsOngoingInTB(txnID1)); + assertFalse(checkTxnIsOngoingInTB(txnID2)); + }); + } + + private boolean checkTxnIsOngoingInTP(TxnID txnID, String subName) throws Exception { + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService() + .getTopic(TopicName.get(TOPIC).toString(), false) + .get().get(); + + PersistentSubscription persistentSubscription = persistentTopic.getSubscription(subName); + + Field field1 = PersistentSubscription.class.getDeclaredField("pendingAckHandle"); + field1.setAccessible(true); + PendingAckHandleImpl pendingAckHandle = (PendingAckHandleImpl) field1.get(persistentSubscription); + + Field field2 = PendingAckHandleImpl.class.getDeclaredField("individualAckOfTransaction"); + field2.setAccessible(true); + LinkedMap> individualAckOfTransaction = + (LinkedMap>) field2.get(pendingAckHandle); + return individualAckOfTransaction.containsKey(txnID); + } + + private boolean checkTxnIsOngoingInTB(TxnID txnID) throws Exception { + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService() + .getTopic(TopicName.get(TOPIC).toString(), false) + .get().get(); + + TopicTransactionBuffer topicTransactionBuffer = + (TopicTransactionBuffer) persistentTopic.getTransactionBuffer(); + Field field3 = TopicTransactionBuffer.class.getDeclaredField("ongoingTxns"); + field3.setAccessible(true); + LinkedMap ongoingTxns = + (LinkedMap) field3.get(topicTransactionBuffer); + return ongoingTxns.containsKey(txnID); + + } + + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java index e43f2625c3306..0184b2725d733 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/buffer/TransactionStablePositionTest.java @@ -18,33 +18,37 @@ */ package org.apache.pulsar.broker.transaction.buffer; +import static org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferState.State.NoSnapshot; +import static org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferState.State.Ready; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; - -import com.google.common.collect.Sets; - +import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.util.concurrent.TimeUnit; - +import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; - +import org.apache.bookkeeper.mledger.Position; +import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBuffer; +import org.apache.pulsar.broker.transaction.buffer.impl.TopicTransactionBufferState; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient; +import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; -import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; /** @@ -54,35 +58,11 @@ @Test(groups = "broker") public class TransactionStablePositionTest extends TransactionTestBase { - private static final String TENANT = "tnx"; - private static final String NAMESPACE1 = TENANT + "/ns1"; private static final String TOPIC = NAMESPACE1 + "/test-topic"; @BeforeMethod protected void setup() throws Exception { - internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); - admin.topics().createNonPartitionedTopic(TOPIC); - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), 16); - - if (pulsarClient != null) { - pulsarClient.shutdown(); - } - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - + setUpBase(1, 16, TOPIC, 0); Awaitility.await().until(() -> ((PulsarClientImpl) pulsarClient) .getTcClient().getState() == TransactionCoordinatorClient.State.READY); } @@ -181,4 +161,82 @@ public void abortTxnTest() throws Exception { assertNull(message); } + @DataProvider(name = "enableTransactionAndState") + public static Object[][] enableTransactionAndState() { + return new Object[][] { + { true, TopicTransactionBufferState.State.None }, + { false, TopicTransactionBufferState.State.None }, + { true, TopicTransactionBufferState.State.Initializing }, + { false, TopicTransactionBufferState.State.Initializing } + }; + } + + @Test(dataProvider = "enableTransactionAndState") + public void testSyncNormalPositionWhenTBRecover(boolean clientEnableTransaction, + TopicTransactionBufferState.State state) throws Exception { + + final String topicName = NAMESPACE1 + "/testSyncNormalPositionWhenTBRecover-" + + clientEnableTransaction + state.name(); + pulsarClient = PulsarClient.builder() + .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) + .statsInterval(0, TimeUnit.SECONDS) + .enableTransaction(clientEnableTransaction) + .build(); + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .sendTimeout(0, TimeUnit.SECONDS) + .topic(topicName) + .create(); + + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0).getBrokerService() + .getTopic(TopicName.get(topicName).toString(), false).get().get(); + + TopicTransactionBuffer topicTransactionBuffer = (TopicTransactionBuffer) persistentTopic.getTransactionBuffer(); + + // wait topic transaction buffer recover success + checkTopicTransactionBufferState(clientEnableTransaction, topicTransactionBuffer); + + Field field = TopicTransactionBufferState.class.getDeclaredField("state"); + field.setAccessible(true); + field.set(topicTransactionBuffer, state); + + // init maxReadPosition is PositionImpl.EARLIEST + Position position = topicTransactionBuffer.getMaxReadPosition(); + assertEquals(position, PositionImpl.earliest); + + MessageIdImpl messageId = (MessageIdImpl) producer.send("test".getBytes()); + + // send normal message can't change MaxReadPosition when state is None or Initializing + position = topicTransactionBuffer.getMaxReadPosition(); + assertEquals(position, PositionImpl.earliest); + + // invoke recover + Method method = TopicTransactionBuffer.class.getDeclaredMethod("recover"); + method.setAccessible(true); + method.invoke(topicTransactionBuffer); + + // change to None state can recover + field.set(topicTransactionBuffer, TopicTransactionBufferState.State.None); + + // recover success again + checkTopicTransactionBufferState(clientEnableTransaction, topicTransactionBuffer); + + // change MaxReadPosition to normal message position + assertEquals(PositionImpl.get(messageId.getLedgerId(), messageId.getEntryId()), + topicTransactionBuffer.getMaxReadPosition()); + } + + private void checkTopicTransactionBufferState(boolean clientEnableTransaction, + TopicTransactionBuffer topicTransactionBuffer) { + // recover success + Awaitility.await().until(() -> { + if (clientEnableTransaction) { + // recover success, client enable transaction will change to Ready State + return topicTransactionBuffer.getStats().state.equals(Ready.name()); + } else { + // recover success, client disable transaction will change to NoSnapshot State + return topicTransactionBuffer.getStats().state.equals(NoSnapshot.name()); + } + }); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreAssignmentTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreAssignmentTest.java index 172530566a98b..01027868e272b 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreAssignmentTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreAssignmentTest.java @@ -20,15 +20,11 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.collect.Sets; import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.transaction.TransactionTestBase; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.ServiceUrlProvider; -import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.awaitility.Awaitility; import org.testng.Assert; @@ -41,15 +37,7 @@ public class TransactionMetaStoreAssignmentTest extends TransactionTestBase { @Override @BeforeMethod(alwaysRun = true) protected void setup() throws Exception { - setBrokerCount(3); - super.internalSetup(); - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), 16); + setUpBase(3, 16, null, 0); pulsarClient.close(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreTestBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreTestBase.java index 04579078ad4af..b012dfa87d29a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreTestBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/coordinator/TransactionMetaStoreTestBase.java @@ -34,6 +34,8 @@ import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; + public abstract class TransactionMetaStoreTestBase extends TestRetrySupport { private static final Logger log = LoggerFactory.getLogger(TransactionMetaStoreTestBase.class); @@ -76,7 +78,7 @@ protected final void setup() throws Exception { config.setTransactionCoordinatorEnabled(true); configurations[i] = config; - pulsarServices[i] = Mockito.spy(new PulsarService(config)); + pulsarServices[i] = spyWithClassAndConstructorArgs(PulsarService.class, config); pulsarServices[i].start(); pulsarAdmins[i] = PulsarAdmin.builder() diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckInMemoryDeleteTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckInMemoryDeleteTest.java index fc952c42baedb..da2a3a940bd2c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckInMemoryDeleteTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckInMemoryDeleteTest.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.broker.transaction.pendingack; -import com.google.common.collect.Sets; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; @@ -35,21 +34,11 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.api.transaction.TxnID; -import org.apache.pulsar.common.naming.NamespaceName; -import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.collections.BitSetRecyclable; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; -import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; -import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; -import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreState; -import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; @@ -58,7 +47,6 @@ import java.lang.reflect.Field; import java.util.HashMap; -import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; @@ -71,39 +59,11 @@ @Test(groups = "broker") public class PendingAckInMemoryDeleteTest extends TransactionTestBase { - private static final String TENANT = "tnx"; - private static final String NAMESPACE1 = TENANT + "/ns1"; private static final int NUM_PARTITIONS = 16; @BeforeMethod protected void setup() throws Exception { - setBrokerCount(1); - internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), NUM_PARTITIONS); - - if (pulsarClient != null) { - pulsarClient.shutdown(); - } - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - - Map stores = - getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores(); - // wait tc init success to ready state - waitForCoordinatorToBeAvailable(NUM_PARTITIONS); + conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); + setUpBase(1, NUM_PARTITIONS, NAMESPACE1 +"/test", 0); } @AfterMethod(alwaysRun = true) diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckMetadataTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckMetadataTest.java new file mode 100644 index 0000000000000..14dbcdb8897f6 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckMetadataTest.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.transaction.pendingack; + +import lombok.Cleanup; +import org.apache.bookkeeper.mledger.AsyncCallbacks; +import org.apache.bookkeeper.mledger.ManagedCursor; +import org.apache.bookkeeper.mledger.ManagedLedger; +import org.apache.bookkeeper.mledger.ManagedLedgerException; +import org.apache.bookkeeper.mledger.ManagedLedgerFactory; +import org.apache.bookkeeper.mledger.ManagedLedgerFactoryConfig; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerFactoryImpl; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.bookkeeper.test.MockedBookKeeperTestCase; +import org.apache.pulsar.broker.transaction.pendingack.impl.MLPendingAckStore; +import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.common.api.proto.CommandAck; +import org.testng.annotations.Test; +import java.lang.reflect.Field; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.State.WriteFailed; +import static org.testng.Assert.assertTrue; +import static org.testng.AssertJUnit.fail; + +public class PendingAckMetadataTest extends MockedBookKeeperTestCase { + + public PendingAckMetadataTest() { + super(3); + } + + @Test + public void testPendingAckManageLedgerWriteFailState() throws Exception { + ManagedLedgerFactoryConfig factoryConf = new ManagedLedgerFactoryConfig(); + factoryConf.setMaxCacheSize(0); + + String pendingAckTopicName = MLPendingAckStore + .getTransactionPendingAckStoreSuffix("test", "test"); + @Cleanup("shutdown") + ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc, factoryConf); + + CompletableFuture completableFuture = new CompletableFuture<>(); + factory.asyncOpen(pendingAckTopicName, new AsyncCallbacks.OpenLedgerCallback() { + @Override + public void openLedgerComplete(ManagedLedger ledger, Object ctx) { + completableFuture.complete(ledger); + } + + @Override + public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { + + } + }, null); + + ManagedCursor cursor = completableFuture.get().openCursor("test"); + ManagedCursor subCursor = completableFuture.get().openCursor("test"); + MLPendingAckStore pendingAckStore = + new MLPendingAckStore(completableFuture.get(), cursor, subCursor, 500); + + Field field = MLPendingAckStore.class.getDeclaredField("managedLedger"); + field.setAccessible(true); + ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) field.get(pendingAckStore); + field = ManagedLedgerImpl.class.getDeclaredField("STATE_UPDATER"); + field.setAccessible(true); + AtomicReferenceFieldUpdater state = + (AtomicReferenceFieldUpdater) field.get(managedLedger); + state.set(managedLedger, WriteFailed); + try { + pendingAckStore.appendAbortMark(new TxnID(1, 1), CommandAck.AckType.Cumulative).get(); + fail(); + } catch (ExecutionException e) { + assertTrue(e.getCause().getCause() instanceof ManagedLedgerException.ManagedLedgerAlreadyClosedException); + } + pendingAckStore.appendAbortMark(new TxnID(1, 1), CommandAck.AckType.Cumulative).get(); + + completableFuture.get().close(); + cursor.close(); + subCursor.close(); + } + +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckPersistentTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckPersistentTest.java index 3820ebc6cee79..b85f4efd133a3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckPersistentTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/transaction/pendingack/PendingAckPersistentTest.java @@ -21,17 +21,20 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; -import com.google.common.collect.Sets; import java.lang.reflect.Field; +import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.mledger.ManagedCursor; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.commons.collections4.map.LinkedMap; import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.broker.transaction.TransactionTestBase; @@ -41,17 +44,20 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.transaction.Transaction; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; +import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.transaction.TransactionImpl; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicStats; import org.awaitility.Awaitility; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -60,38 +66,16 @@ * Test for consuming transaction messages. */ @Slf4j +@Test(groups = "broker") public class PendingAckPersistentTest extends TransactionTestBase { - private static final String PENDING_ACK_REPLAY_TOPIC = "persistent://public/txn/pending-ack-replay"; - - private static final String NAMESPACE = "public/txn"; + private static final String PENDING_ACK_REPLAY_TOPIC = NAMESPACE1 + "/pending-ack-replay"; private static final int NUM_PARTITIONS = 16; @BeforeMethod public void setup() throws Exception { - setBrokerCount(1); - super.internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterDataImpl.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), 16); - admin.tenants().createTenant("public", - new TenantInfoImpl(Sets.newHashSet(), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE, 10); - admin.topics().createNonPartitionedTopic(PENDING_ACK_REPLAY_TOPIC); - - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - // wait tc init success to ready state - waitForCoordinatorToBeAvailable(NUM_PARTITIONS); + setUpBase(1, NUM_PARTITIONS, PENDING_ACK_REPLAY_TOPIC, 0); } @AfterMethod(alwaysRun = true) @@ -217,6 +201,8 @@ public void individualPendingAckReplayTest() throws Exception { @Test public void cumulativePendingAckReplayTest() throws Exception { int messageCount = 1000; + getPulsarServiceList().get(0).getConfig().setTransactionPendingAckLogIndexMinLag(4 * messageCount + 2); + getPulsarServiceList().get(0).getConfiguration().setManagedLedgerDefaultMarkDeleteRateLimit(10); String subName = "cumulative-test"; @Cleanup @@ -303,7 +289,7 @@ public void cumulativePendingAckReplayTest() throws Exception { // in order to check out the pending ack cursor is clear whether or not. Awaitility.await() .until(() -> ((PositionImpl) managedCursor.getMarkDeletedPosition()) - .compareTo((PositionImpl) managedCursor.getManagedLedger().getLastConfirmedEntry()) == -1); + .compareTo((PositionImpl) managedCursor.getManagedLedger().getLastConfirmedEntry()) == 0); } @Test @@ -312,7 +298,7 @@ private void testDeleteSubThenDeletePendingAckManagedLedger() throws Exception { String subName = "test-delete"; String topic = TopicName.get(TopicDomain.persistent.toString(), - NamespaceName.get(NAMESPACE), "test-delete").toString(); + NamespaceName.get(NAMESPACE1), "test-delete").toString(); @Cleanup Consumer consumer = pulsarClient.newConsumer() .topic(topic) @@ -325,7 +311,7 @@ private void testDeleteSubThenDeletePendingAckManagedLedger() throws Exception { admin.topics().deleteSubscription(topic, subName); - List topics = admin.namespaces().getTopics(NAMESPACE); + List topics = admin.namespaces().getTopics(NAMESPACE1); TopicStats topicStats = admin.topics().getStats(topic, false); @@ -341,7 +327,7 @@ private void testDeleteTopicThenDeletePendingAckManagedLedger() throws Exception String subName2 = "test-delete"; String topic = TopicName.get(TopicDomain.persistent.toString(), - NamespaceName.get(NAMESPACE), "test-delete").toString(); + NamespaceName.get(NAMESPACE1), "test-delete").toString(); @Cleanup Consumer consumer1 = pulsarClient.newConsumer() .topic(topic) @@ -364,10 +350,354 @@ private void testDeleteTopicThenDeletePendingAckManagedLedger() throws Exception admin.topics().delete(topic); - List topics = admin.namespaces().getTopics(NAMESPACE); + List topics = admin.namespaces().getTopics(NAMESPACE1); assertFalse(topics.contains(MLPendingAckStore.getTransactionPendingAckStoreSuffix(topic, subName1))); assertFalse(topics.contains(MLPendingAckStore.getTransactionPendingAckStoreSuffix(topic, subName2))); assertFalse(topics.contains(topic)); } + + @Test + public void testDeleteUselessLogDataWhenSubCursorMoved() throws Exception { + getPulsarServiceList().get(0).getConfig().setTransactionPendingAckLogIndexMinLag(5); + getPulsarServiceList().get(0).getConfiguration().setManagedLedgerDefaultMarkDeleteRateLimit(5); + String subName = "test-log-delete"; + String topic = TopicName.get(TopicDomain.persistent.toString(), + NamespaceName.get(NAMESPACE1), "test-log-delete").toString(); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName(subName) + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .enableBatching(false) + .create(); + + for (int i = 0; i < 20; i++) { + producer.newMessage().send(); + } + // init + Message message = consumer.receive(5, TimeUnit.SECONDS); + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + consumer.acknowledgeAsync(message.getMessageId(), transaction).get(); + + PersistentTopic persistentTopic = (PersistentTopic) getPulsarServiceList().get(0) + .getBrokerService().getTopic(topic, false).get().get(); + + PersistentSubscription persistentSubscription = persistentTopic.getSubscription(subName); + Field field = PersistentSubscription.class.getDeclaredField("pendingAckHandle"); + field.setAccessible(true); + PendingAckHandleImpl pendingAckHandle = (PendingAckHandleImpl) field.get(persistentSubscription); + Field field1 = PendingAckHandleImpl.class.getDeclaredField("pendingAckStoreFuture"); + field1.setAccessible(true); + PendingAckStore pendingAckStore = ((CompletableFuture) field1.get(pendingAckHandle)).get(); + + Field field3 = MLPendingAckStore.class.getDeclaredField("pendingAckLogIndex"); + Field field4 = MLPendingAckStore.class.getDeclaredField("maxIndexLag"); + + field3.setAccessible(true); + field4.setAccessible(true); + + ConcurrentSkipListMap pendingAckLogIndex = + (ConcurrentSkipListMap) field3.get(pendingAckStore); + long maxIndexLag = (long) field4.get(pendingAckStore); + Assert.assertEquals(pendingAckLogIndex.size(), 0); + Assert.assertEquals(maxIndexLag, 5); + transaction.commit().get(); + + Awaitility.await().untilAsserted(() -> + Assert.assertEquals(persistentSubscription.getCursor().getPersistentMarkDeletedPosition().getEntryId(), + ((MessageIdImpl)message.getMessageId()).getEntryId())); + // 7 more acks. Will find that there are still only two records in the map. + Transaction transaction1 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + Message message0 = null; + //remove previous index + for (int i = 0; i < 4; i++) { + message0 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message0.getMessageId(), transaction1).get(); + } + Assert.assertEquals(pendingAckLogIndex.size(), 1); + maxIndexLag = (long) field4.get(pendingAckStore); + Assert.assertEquals(maxIndexLag, 5); + //add new index + for (int i = 0; i < 9; i++) { + message0= consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message0.getMessageId(), transaction1).get(); + } + + Assert.assertEquals(pendingAckLogIndex.size(), 2); + maxIndexLag = (long) field4.get(pendingAckStore); + Assert.assertEquals(maxIndexLag, 10); + + transaction1.commit().get(); + Message message1 = message0; + Awaitility.await().untilAsserted(() -> + Assert.assertEquals(persistentSubscription.getCursor().getPersistentMarkDeletedPosition().getEntryId(), + ((MessageIdImpl)message1.getMessageId()).getEntryId())); + + Transaction transaction2 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + Message message2 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message2.getMessageId(), transaction2).get(); + + Assert.assertEquals(pendingAckLogIndex.size(), 0); + maxIndexLag = (long) field4.get(pendingAckStore); + Assert.assertEquals(maxIndexLag, 5); + } + + @Test + public void testPendingAckLowWaterMarkRemoveFirstTxn() throws Exception { + String topic = TopicName.get(TopicDomain.persistent.toString(), + NamespaceName.get(NAMESPACE1), "test").toString(); + + String subName = "subName"; + + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Failover) + .enableBatchIndexAcknowledgment(true) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .sendTimeout(0, TimeUnit.SECONDS) + .create(); + + for (int i = 0; i < 5; i++) { + producer.newMessage().send(); + } + + Transaction transaction1 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + + Message message1 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message1.getMessageId(), transaction1); + transaction1.commit().get(); + + + Transaction transaction2 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + while (transaction1.getTxnID().getMostSigBits() != transaction2.getTxnID().getMostSigBits()) { + transaction2 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + } + + Transaction transaction3 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + while (transaction1.getTxnID().getMostSigBits() != transaction3.getTxnID().getMostSigBits()) { + transaction3 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + } + + Message message3 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message3.getMessageId(), transaction2); + transaction2.commit().get(); + + Message message2 = consumer.receive(5, TimeUnit.SECONDS); + + Field field = TransactionImpl.class.getDeclaredField("state"); + field.setAccessible(true); + field.set(transaction1, TransactionImpl.State.OPEN); + + consumer.acknowledgeAsync(message2.getMessageId(), transaction1).get(); + Message message4 = consumer.receive(5, TimeUnit.SECONDS); + field.set(transaction2, TransactionImpl.State.OPEN); + consumer.acknowledgeAsync(message4.getMessageId(), transaction2).get(); + + Message message5 = consumer.receive(5, TimeUnit.SECONDS); + consumer.acknowledgeAsync(message5.getMessageId(), transaction3); + transaction3.commit().get(); + + + PersistentTopic persistentTopic = + (PersistentTopic) getPulsarServiceList() + .get(0) + .getBrokerService() + .getTopic(topic, false) + .get() + .get(); + + PersistentSubscription persistentSubscription = persistentTopic.getSubscription(subName); + Field field1 = PersistentSubscription.class.getDeclaredField("pendingAckHandle"); + field1.setAccessible(true); + PendingAckHandleImpl oldPendingAckHandle = (PendingAckHandleImpl) field1.get(persistentSubscription); + Field field2 = PendingAckHandleImpl.class.getDeclaredField("individualAckOfTransaction"); + field2.setAccessible(true); + LinkedMap> oldIndividualAckOfTransaction = + (LinkedMap>) field2.get(oldPendingAckHandle); + Awaitility.await().untilAsserted(() -> Assert.assertEquals(oldIndividualAckOfTransaction.size(), 0)); + + PendingAckHandleImpl pendingAckHandle = new PendingAckHandleImpl(persistentSubscription); + + Method method = PendingAckHandleImpl.class.getDeclaredMethod("initPendingAckStore"); + method.setAccessible(true); + method.invoke(pendingAckHandle); + + Field field3 = PendingAckHandleImpl.class.getDeclaredField("pendingAckStoreFuture"); + field3.setAccessible(true); + + Awaitility.await().until(() -> { + CompletableFuture completableFuture = + (CompletableFuture) field3.get(pendingAckHandle); + completableFuture.get(); + return true; + }); + + + LinkedMap> individualAckOfTransaction = + (LinkedMap>) field2.get(pendingAckHandle); + + assertFalse(individualAckOfTransaction.containsKey(transaction1.getTxnID())); + assertFalse(individualAckOfTransaction.containsKey(transaction2.getTxnID())); + + } + + @Test + public void testTransactionConflictExceptionWhenAckBatchMessage() throws Exception { + String topic = TopicName.get(TopicDomain.persistent.toString(), + NamespaceName.get(NAMESPACE1), "test").toString(); + + String subscriptionName = "my-subscription-batch"; + pulsarServiceList.get(0).getBrokerService() + .getManagedLedgerConfig(TopicName.get(topic)).get() + .setDeletionAtBatchIndexLevelEnabled(true); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(true) + .batchingMaxMessages(3) + // set batch max publish delay big enough to make sure entry has 3 messages + .batchingMaxPublishDelay(10, TimeUnit.SECONDS) + .topic(topic).create(); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .subscriptionName(subscriptionName) + .enableBatchIndexAcknowledgment(true) + .subscriptionType(SubscriptionType.Exclusive) + .isAckReceiptEnabled(true) + .topic(topic) + .subscribe(); + + List messageIds = new ArrayList<>(); + List> futureMessageIds = new ArrayList<>(); + + List messages = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + String message = "my-message-" + i; + messages.add(message); + CompletableFuture messageIdCompletableFuture = producer.sendAsync(message); + futureMessageIds.add(messageIdCompletableFuture); + } + + for (CompletableFuture futureMessageId : futureMessageIds) { + MessageId messageId = futureMessageId.get(); + messageIds.add(messageId); + } + + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.DAYS) + .build() + .get(); + + Message message1 = consumer.receive(); + Message message2 = consumer.receive(); + + BatchMessageIdImpl messageId = (BatchMessageIdImpl) message2.getMessageId(); + consumer.acknowledgeAsync(messageId, transaction).get(); + + Transaction transaction2 = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.DAYS) + .build() + .get(); + transaction.commit().get(); + + try { + consumer.acknowledgeAsync(messageId, transaction2).get(); + fail(); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof PulsarClientException.TransactionConflictException); + } + } + + @Test + public void testGetSubPatternTopicFilterTxnInternalTopic() throws Exception { + String topic = TopicName.get(TopicDomain.persistent.toString(), + NamespaceName.get(NAMESPACE1), "testGetSubPatternTopicFilterTxnInternalTopic").toString(); + + int partition = 3; + admin.topics().createPartitionedTopic(topic, partition); + + String subscriptionName = "sub"; + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false) + .topic(topic).create(); + + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .subscriptionName(subscriptionName) + .subscriptionType(SubscriptionType.Shared) + .topic(topic) + .subscribe(); + + for (int i = 0; i < partition; i++) { + producer.send("test"); + } + + // creat pending ack managedLedger + for (int i = 0; i < partition; i++) { + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(5, TimeUnit.SECONDS) + .build() + .get(); + consumer.acknowledgeAsync(consumer.receive().getMessageId(), transaction); + transaction.commit().get(); + } + + consumer.close(); + + @Cleanup + Consumer patternConsumer = pulsarClient.newConsumer(Schema.STRING) + .subscriptionName("patternSub") + .subscriptionType(SubscriptionType.Shared) + .topicsPattern("persistent://" + NAMESPACE1 + "/.*") + .subscribe(); + + for (int i = 0; i < partition; i++) { + producer.send("test" + i); + } + + // can use pattern sub consume + for (int i = 0; i < partition; i++) { + patternConsumer.acknowledgeAsync(patternConsumer.receive().getMessageId()); + } + patternConsumer.close(); + producer.close(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java index 1703f73983b28..c1938adfc5f86 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/RestExceptionTest.java @@ -22,6 +22,7 @@ import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response.Status; +import org.apache.pulsar.common.policies.data.ErrorData; import org.testng.annotations.Test; /** @@ -54,7 +55,8 @@ public void testOtherException() { RestException testException = new RestException(otherException); assertEquals(Status.INTERNAL_SERVER_ERROR.getStatusCode(), testException.getResponse().getStatus()); - assertEquals(RestException.getExceptionData(otherException), testException.getResponse().getEntity()); + ErrorData errorData = (ErrorData)testException.getResponse().getEntity(); + assertEquals(RestException.getExceptionData(otherException), errorData.reason); } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java index b8f3ac472c8fc..4173d5152ea35 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/WebServiceTest.java @@ -18,18 +18,21 @@ */ package org.apache.pulsar.broker.web; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; +import com.google.common.collect.Multimap; import com.google.common.collect.Sets; import com.google.common.io.CharStreams; import com.google.common.io.Closeables; import io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -38,12 +41,7 @@ import java.security.PrivateKey; import java.security.SecureRandom; import java.security.cert.Certificate; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Optional; -import java.util.Set; +import java.util.*; import java.util.concurrent.CompletableFuture; import javax.net.ssl.HttpsURLConnection; @@ -59,6 +57,8 @@ import org.apache.pulsar.broker.PulsarService; import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.stats.PrometheusMetricsTest; +import org.apache.pulsar.broker.stats.prometheus.PrometheusMetricsGenerator; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminBuilder; import org.apache.pulsar.client.admin.PulsarAdminException.ConflictException; @@ -99,6 +99,42 @@ public class WebServiceTest { private static final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/certificate/client.crt"; private static final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/certificate/client.key"; + @Test + public void testWebExecutorMetrics() throws Exception { + setupEnv(true, "1.0", true, false, false, false, -1, false); + ByteArrayOutputStream statsOut = new ByteArrayOutputStream(); + PrometheusMetricsGenerator.generate(pulsar, false, false, false, statsOut); + String metricsStr = statsOut.toString(); + Multimap metrics = PrometheusMetricsTest.parseMetrics(metricsStr); + + Collection maxThreads = metrics.get("pulsar_web_executor_max_threads"); + Collection minThreads = metrics.get("pulsar_web_executor_min_threads"); + Collection activeThreads = metrics.get("pulsar_web_executor_active_threads"); + Collection idleThreads = metrics.get("pulsar_web_executor_idle_threads"); + Collection currentThreads = metrics.get("pulsar_web_executor_current_threads"); + + for (PrometheusMetricsTest.Metric metric : maxThreads) { + Assert.assertNotNull(metric.tags.get("cluster")); + Assert.assertTrue(metric.value > 0); + } + for (PrometheusMetricsTest.Metric metric : minThreads) { + Assert.assertNotNull(metric.tags.get("cluster")); + Assert.assertTrue(metric.value > 0); + } + for (PrometheusMetricsTest.Metric metric : activeThreads) { + Assert.assertNotNull(metric.tags.get("cluster")); + Assert.assertTrue(metric.value >= 0); + } + for (PrometheusMetricsTest.Metric metric : idleThreads) { + Assert.assertNotNull(metric.tags.get("cluster")); + Assert.assertTrue(metric.value >= 0); + } + for (PrometheusMetricsTest.Metric metric : currentThreads) { + Assert.assertNotNull(metric.tags.get("cluster")); + Assert.assertTrue(metric.value > 0); + } + } + /** * Test that the {@WebService} class properly passes the allowUnversionedClients value. We do this by setting * allowUnversionedClients to true, then making a request with no version, which should go through. @@ -389,7 +425,7 @@ private void setupEnv(boolean enableFilter, String minApiVersion, boolean allowU config.setHttpRequestsLimitEnabled(true); config.setHttpRequestsMaxPerSecond(rateLimit); } - pulsar = spy(new PulsarService(config)); + pulsar = spyWithClassAndConstructorArgs(PulsarService.class, config); // mock zk MockZooKeeper mockZooKeeper = MockedPulsarServiceBaseTest.createMockZooKeeper(); ZooKeeperClientFactory mockZooKeeperClientFactory = new ZooKeeperClientFactory() { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServletWithClassLoaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServletWithClassLoaderTest.java new file mode 100644 index 0000000000000..c875e8ac129cf --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/web/plugin/servlet/AdditionalServletWithClassLoaderTest.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.web.plugin.servlet; + +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.common.configuration.PulsarConfiguration; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.eclipse.jetty.servlet.ServletHolder; +import org.testng.annotations.Test; + + +/** + * Unit test {@link AdditionalServletWithClassLoader}. + */ +@Test(groups = "broker") +public class AdditionalServletWithClassLoaderTest { + + @Test + public void testWrapper() { + AdditionalServlet servlet = mock(AdditionalServlet.class); + NarClassLoader loader = mock(NarClassLoader.class); + AdditionalServletWithClassLoader wrapper = new AdditionalServletWithClassLoader(servlet, loader); + // test getBasePath + String basePath = "bathPath"; + when(servlet.getBasePath()).thenReturn(basePath); + assertEquals(basePath, wrapper.getBasePath()); + verify(servlet, times(1)).getBasePath(); + // test loadConfig + ServiceConfiguration conf = new ServiceConfiguration(); + wrapper.loadConfig(conf); + verify(servlet, times(1)).loadConfig(same(conf)); + // test getServlet + assertEquals(wrapper.getServlet(),servlet); + // test getServletHolder + ServletHolder servletHolder = new ServletHolder(); + when(servlet.getServletHolder()).thenReturn(servletHolder); + assertEquals(wrapper.getServletHolder(),servletHolder); + verify(servlet, times(1)).getServletHolder(); + } + + @Test + public void testClassLoaderSwitcher() throws Exception { + NarClassLoader narLoader = mock(NarClassLoader.class); + AdditionalServlet servlet = new AdditionalServlet() { + @Override + public void loadConfig(PulsarConfiguration pulsarConfiguration) { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + + @Override + public String getBasePath() { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + return "base-path"; + } + + @Override + public ServletHolder getServletHolder() { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + return null; + } + + @Override + public void close() { + assertEquals(Thread.currentThread().getContextClassLoader(), narLoader); + } + }; + + AdditionalServletWithClassLoader additionalServletWithClassLoader = + new AdditionalServletWithClassLoader(servlet, narLoader); + ClassLoader curClassLoader = Thread.currentThread().getContextClassLoader(); + // test class loader + assertEquals(additionalServletWithClassLoader.getClassLoader(), narLoader); + // test getBasePath + assertEquals(additionalServletWithClassLoader.getBasePath(), "base-path"); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test loadConfig + ServiceConfiguration conf = new ServiceConfiguration(); + additionalServletWithClassLoader.loadConfig(conf); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test getServletHolder + assertNull(additionalServletWithClassLoader.getServletHolder()); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test getServlet + assertEquals(additionalServletWithClassLoader.getServlet(), servlet); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + // test close + additionalServletWithClassLoader.close(); + assertEquals(Thread.currentThread().getContextClassLoader(), curClassLoader); + + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/zookeeper/MultiBrokerMetadataConsistencyTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/zookeeper/MultiBrokerMetadataConsistencyTest.java new file mode 100644 index 0000000000000..15849a6773a1b --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/zookeeper/MultiBrokerMetadataConsistencyTest.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.broker.zookeeper; + +import static org.testng.Assert.assertTrue; +import java.util.List; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.MultiBrokerBaseTest; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.metadata.TestZKServer; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.MetadataStoreException; +import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; +import org.testng.annotations.Test; + +@Slf4j +@Test(groups = "broker") +public class MultiBrokerMetadataConsistencyTest extends MultiBrokerBaseTest { + @Override + protected int numberOfAdditionalBrokers() { + return 2; + } + + TestZKServer testZKServer; + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + testZKServer = new TestZKServer(); + } + + @Override + protected void onCleanup() { + super.onCleanup(); + if (testZKServer != null) { + try { + testZKServer.close(); + } catch (Exception e) { + log.error("Error in stopping ZK server", e); + } + } + } + + @Override + protected MetadataStoreExtended createLocalMetadataStore() throws MetadataStoreException { + return MetadataStoreExtended.create(testZKServer.getConnectionString(), MetadataStoreConfig.builder().build()); + } + + @Override + protected MetadataStoreExtended createConfigurationMetadataStore() throws MetadataStoreException { + return MetadataStoreExtended.create(testZKServer.getConnectionString(), MetadataStoreConfig.builder().build()); + } + + @Test + public void newTopicShouldBeInTopicsList() throws PulsarAdminException { + List admins = getAllAdmins(); + PulsarAdmin first = admins.get(0); + PulsarAdmin second = admins.get(1); + List cacheMiss = second.topics().getList("public/default"); + assertTrue(cacheMiss.isEmpty()); + first.topics().createNonPartitionedTopic("persistent://public/default/my-topic"); + List topics = second.topics().getList("public/default"); + assertTrue(topics.contains("persistent://public/default/my-topic")); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java index 6d76ce8cbfe8e..e0cc980991ef7 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticatedProducerConsumerTest.java @@ -19,7 +19,10 @@ package org.apache.pulsar.client.api; import static org.mockito.Mockito.spy; - +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import com.google.common.collect.Sets; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -27,20 +30,20 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; - import javax.ws.rs.InternalServerErrorException; - import org.apache.pulsar.broker.authentication.AuthenticationProviderBasic; import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.impl.auth.AuthenticationBasic; import org.apache.pulsar.client.impl.auth.AuthenticationTls; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.AuthAction; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.zookeeper.KeeperException.Code; +import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; @@ -49,8 +52,6 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import com.google.common.collect.Sets; - @Test(groups = "broker-api") public class AuthenticatedProducerConsumerTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(AuthenticatedProducerConsumerTest.class); @@ -79,6 +80,7 @@ protected void setup() throws Exception { conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); conf.setTlsAllowInsecureConnection(true); + conf.setTopicLevelPoliciesEnabled(false); Set superUserRoles = new HashSet<>(); superUserRoles.add("localhost"); @@ -87,9 +89,10 @@ protected void setup() throws Exception { superUserRoles.add("admin"); conf.setSuperUserRoles(superUserRoles); + conf.setBrokerClientTlsEnabled(true); conf.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); conf.setBrokerClientAuthenticationParameters( - "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_SERVER_KEY_FILE_PATH); + "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); Set providers = new HashSet<>(); providers.add(AuthenticationProviderTls.class.getName()); @@ -337,4 +340,68 @@ public void testInternalServerExceptionOnLookup() throws Exception { mockZooKeeperGlobal.unsetAlwaysFail(); } + @Test + public void testDeleteAuthenticationPoliciesOfTopic() throws Exception { + Map authParams = new HashMap<>(); + authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); + authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); + Authentication authTls = new AuthenticationTls(); + authTls.configure(authParams); + internalSetup(authTls); + + admin.clusters().createCluster("test", ClusterData.builder().build()); + admin.tenants().createTenant("p1", + new TenantInfoImpl(Collections.emptySet(), new HashSet<>(admin.clusters().getClusters()))); + admin.namespaces().createNamespace("p1/ns1"); + + // test for non-partitioned topic + String topic = "persistent://p1/ns1/topic"; + admin.topics().createNonPartitionedTopic(topic); + admin.topics().grantPermission(topic, "test-user", EnumSet.of(AuthAction.consume)); + + Awaitility.await().untilAsserted(() -> { + assertTrue(pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get("p1/ns1")) + .get().auth_policies.getTopicAuthentication().containsKey(topic)); + }); + + admin.topics().delete(topic); + + Awaitility.await().untilAsserted(() -> { + assertFalse(pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get("p1/ns1")) + .get().auth_policies.getTopicAuthentication().containsKey(topic)); + }); + + // test for partitioned topic + String partitionedTopic = "persistent://p1/ns1/partitioned-topic"; + int numPartitions = 5; + + admin.topics().createPartitionedTopic(partitionedTopic, numPartitions); + admin.topics() + .grantPermission(partitionedTopic, "test-user", EnumSet.of(AuthAction.consume)); + + Awaitility.await().untilAsserted(() -> { + assertTrue(pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get("p1/ns1")) + .get().auth_policies.getTopicAuthentication().containsKey(partitionedTopic)); + for (int i = 0; i < numPartitions; i++) { + assertTrue(pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get("p1/ns1")) + .get().auth_policies.getTopicAuthentication() + .containsKey(TopicName.get(partitionedTopic).getPartition(i).toString())); + } + }); + + admin.topics().deletePartitionedTopic("persistent://p1/ns1/partitioned-topic"); + Awaitility.await().untilAsserted(() -> { + assertFalse(pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get("p1/ns1")) + .get().auth_policies.getTopicAuthentication().containsKey(partitionedTopic)); + for (int i = 0; i < numPartitions; i++) { + assertFalse(pulsar.getPulsarResources().getNamespaceResources().getPolicies(NamespaceName.get("p1/ns1")) + .get().auth_policies.getTopicAuthentication() + .containsKey(TopicName.get(partitionedTopic).getPartition(i).toString())); + } + }); + + admin.namespaces().deleteNamespace("p1/ns1"); + admin.tenants().deleteTenant("p1"); + admin.clusters().deleteCluster("test"); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java index bb8a02143e5e7..157b35a8aa9ec 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthenticationTlsHostnameVerificationTest.java @@ -18,8 +18,7 @@ */ package org.apache.pulsar.client.api; -import static org.mockito.Mockito.spy; - +import com.google.common.collect.Sets; import java.lang.reflect.Method; import java.util.HashMap; import java.util.HashSet; @@ -27,16 +26,12 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; - import org.apache.pulsar.broker.authentication.AuthenticationProviderBasic; import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; -import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.impl.auth.AuthenticationTls; -import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.tls.PublicSuffixMatcher; import org.apache.pulsar.common.tls.TlsHostnameVerifier; import org.apache.pulsar.common.policies.data.ClusterDataImpl; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; @@ -44,8 +39,6 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import com.google.common.collect.Sets; - @Test(groups = "broker-api") public class AuthenticationTlsHostnameVerificationTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(AuthenticationTlsHostnameVerificationTest.class); @@ -65,8 +58,13 @@ public class AuthenticationTlsHostnameVerificationTest extends ProducerConsumerB private final String BASIC_CONF_FILE_PATH = "./src/test/resources/authentication/basic/.htpasswd"; private boolean hostnameVerificationEnabled = true; + private String clientTrustCertFilePath = TLS_TRUST_CERT_FILE_PATH; protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + super.stopBroker(); + if (methodName.equals("testAnonymousSyncProducerAndConsumer")) { conf.setAnonymousUserRole("anonymousUser"); } @@ -74,7 +72,7 @@ protected void setup() throws Exception { conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); - conf.setTlsAllowInsecureConnection(true); + conf.setTlsAllowInsecureConnection(false); Set superUserRoles = new HashSet<>(); superUserRoles.add("localhost"); @@ -96,7 +94,7 @@ protected void setup() throws Exception { conf.setClusterName("test"); conf.setNumExecutorThreadPoolSize(5); - super.init(); + startBroker(); setupClient(); } @@ -109,22 +107,11 @@ protected void setupClient() throws Exception { Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); - admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString()) - .tlsTrustCertsFilePath(TLS_MIM_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(true) - .authentication(authTls).build()); replacePulsarClient(PulsarClient.builder() .serviceUrl(pulsar.getBrokerServiceUrlTls()) .statsInterval(0, TimeUnit.SECONDS) - .tlsTrustCertsFilePath(TLS_MIM_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(true) + .tlsTrustCertsFilePath(clientTrustCertFilePath) .authentication(authTls).enableTls(true).enableTlsHostnameVerification(hostnameVerificationEnabled)); - - admin.clusters().createCluster("test", ClusterData.builder() - .serviceUrl(brokerUrl.toString()) - .build()); - - admin.tenants().createTenant("my-property", - new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("test"))); - admin.namespaces().createNamespace("my-property/my-ns", Sets.newHashSet("test")); } @AfterMethod(alwaysRun = true) @@ -157,10 +144,11 @@ public void testTlsSyncProducerAndConsumerWithInvalidBrokerHost(boolean hostname log.info("-- Starting {} test --", methodName); this.hostnameVerificationEnabled = hostnameVerificationEnabled; + clientTrustCertFilePath = TLS_MIM_TRUST_CERT_FILE_PATH; // setup broker cert which has CN = "pulsar" different than broker's hostname="localhost" conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); - conf.setTlsTrustCertsFilePath(TLS_MIM_TRUST_CERT_FILE_PATH); + conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); conf.setTlsCertificateFilePath(TLS_MIM_SERVER_CERT_FILE_PATH); conf.setTlsKeyFilePath(TLS_MIM_SERVER_KEY_FILE_PATH); conf.setBrokerClientAuthenticationParameters( diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java index 0ad79ff816cef..dcfb16c92de61 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/AuthorizationProducerConsumerTest.java @@ -20,15 +20,21 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.mockito.Mockito.spy; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; +import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import java.io.IOException; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -52,6 +58,7 @@ import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.NamespaceOperation; +import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TenantOperation; @@ -175,6 +182,7 @@ public void testSubscriberPermission() throws Exception { final String tenantRole = "tenant-role"; final String subscriptionRole = "sub1-role"; final String subscriptionName = "sub1"; + final String subscriptionName2 = "sub2"; final String namespace = "my-property/my-ns-sub-auth"; final String topicName = "persistent://" + namespace + "/my-topic"; Authentication adminAuthentication = new ClientAuthentication("superUser"); @@ -202,7 +210,26 @@ public void testSubscriberPermission() throws Exception { superAdmin.tenants().createTenant("my-property", new TenantInfoImpl(Sets.newHashSet(tenantRole), Sets.newHashSet("test"))); superAdmin.namespaces().createNamespace(namespace, Sets.newHashSet("test")); - tenantAdmin.namespaces().grantPermissionOnNamespace(namespace, subscriptionRole, + assertNull(superAdmin.namespaces().getPublishRate(namespace)); + + // subscriptionRole doesn't have topic-level authorization, so it will fail to get topic stats-internal info + try { + sub1Admin.topics().getInternalStats(topicName, true); + fail("should have failed with authorization exception"); + } catch (Exception e) { + assertTrue(e.getMessage().startsWith( + "Unauthorized to validateTopicOperation for operation [GET_STATS]")); + } + try { + sub1Admin.topics().getBacklogSizeByMessageId(topicName, MessageId.earliest); + fail("should have failed with authorization exception"); + } catch (Exception e) { + assertTrue(e.getMessage().startsWith( + "Unauthorized to validateTopicOperation for operation")); + } + + // grant topic consume authorization to the subscriptionRole + tenantAdmin.topics().grantPermission(topicName, subscriptionRole, Collections.singleton(AuthAction.consume)); replacePulsarClient(PulsarClient.builder() @@ -212,7 +239,19 @@ public void testSubscriberPermission() throws Exception { // (1) Create subscription name Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName) .subscribe(); + Consumer consumer2 = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName2) + .subscribe(); consumer.close(); + consumer2.close(); + + List subscriptions = sub1Admin.topics().getSubscriptions(topicName); + assertEquals(subscriptions.size(), 2); + + // now, subscriptionRole have consume authorization on topic, so it will successfully get topic internal stats + PersistentTopicInternalStats internalStats = sub1Admin.topics().getInternalStats(topicName, true); + assertNotNull(internalStats); + Long backlogSize = sub1Admin.topics().getBacklogSizeByMessageId(topicName, MessageId.earliest); + assertEquals(backlogSize.longValue(), 0); // verify tenant is able to perform all subscription-admin api tenantAdmin.topics().skipAllMessages(topicName, subscriptionName); @@ -228,10 +267,24 @@ public void testSubscriberPermission() throws Exception { tenantAdmin.topics().resetCursor(topicName, subscriptionName, 10); tenantAdmin.topics().resetCursor(topicName, subscriptionName, MessageId.earliest); + // subscriptionRole doesn't have namespace-level authorization, so it will fail to unsubscribe namespace + try { + sub1Admin.namespaces().unsubscribeNamespace(namespace, subscriptionName2); + fail("should have failed with authorization exception"); + } catch (Exception e) { + assertTrue(e.getMessage().startsWith( + "Unauthorized to validateNamespaceOperation for operation [UNSUBSCRIBE]")); + } + // grant namespace-level authorization to the subscriptionRole tenantAdmin.namespaces().grantPermissionOnNamespace(namespace, subscriptionRole, Collections.singleton(AuthAction.consume)); + // now, subscriptionRole have consume authorization on namespace, so it will successfully unsubscribe namespace + sub1Admin.namespaces().unsubscribeNamespaceBundle(namespace, "0x00000000_0xffffffff", subscriptionName2); + subscriptions = sub1Admin.topics().getSubscriptions(topicName); + assertEquals(subscriptions.size(), 1); + // subscriptionRole has namespace-level authorization sub1Admin.topics().resetCursor(topicName, subscriptionName, 10); @@ -239,6 +292,9 @@ public void testSubscriberPermission() throws Exception { String otherPrincipal = "Principal-1-to-access-sub"; tenantAdmin.namespaces().grantPermissionOnSubscription(namespace, subscriptionName, Collections.singleton(otherPrincipal)); + TreeMap> permissionOnSubscription = new TreeMap<>(); + permissionOnSubscription.put(subscriptionName, Collections.singleton(otherPrincipal)); + Assert.assertEquals(tenantAdmin.namespaces().getPermissionOnSubscription(namespace), permissionOnSubscription); // now, subscriptionRole doesn't have subscription level access so, it will fail to access subscription try { @@ -259,6 +315,9 @@ public void testSubscriberPermission() throws Exception { // now, grant subscription-access to subscriptionRole as well superAdmin.namespaces().grantPermissionOnSubscription(namespace, subscriptionName, Sets.newHashSet(otherPrincipal, subscriptionRole)); + TreeMap> permissionOnSubscription1 = new TreeMap<>(); + permissionOnSubscription1.put(subscriptionName, Sets.newHashSet(otherPrincipal, subscriptionRole)); + Assert.assertEquals(tenantAdmin.namespaces().getPermissionOnSubscription(namespace), permissionOnSubscription1); sub1Admin.topics().skipAllMessages(topicName, subscriptionName); sub1Admin.topics().skipMessages(topicName, subscriptionName, 1); @@ -283,6 +342,104 @@ public void testSubscriberPermission() throws Exception { log.info("-- Exiting {} test --", methodName); } + @Test + public void testClearBacklogPermission() throws Exception { + log.info("-- Starting {} test --", methodName); + + conf.setAuthorizationProvider(PulsarAuthorizationProvider.class.getName()); + setup(); + + final String tenantRole = "tenant-role"; + final String subscriptionRole = "sub-role"; + final String subscriptionName = "sub1"; + final String namespace = "my-property/my-ns-sub-auth"; + final String topicName = "persistent://" + namespace + "/my-topic"; + Authentication adminAuthentication = new ClientAuthentication("superUser"); + + clientAuthProviderSupportedRoles.add(subscriptionRole); + + @Cleanup + PulsarAdmin superAdmin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrl.toString()) + .authentication(adminAuthentication).build()); + + Authentication tenantAdminAuthentication = new ClientAuthentication(tenantRole); + @Cleanup + PulsarAdmin tenantAdmin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrl.toString()) + .authentication(tenantAdminAuthentication).build()); + + Authentication subAdminAuthentication = new ClientAuthentication(subscriptionRole); + @Cleanup + PulsarAdmin sub1Admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrl.toString()) + .authentication(subAdminAuthentication).build()); + + superAdmin.clusters().createCluster("test", + ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + superAdmin.tenants().createTenant("my-property", + new TenantInfoImpl(Sets.newHashSet(tenantRole), Sets.newHashSet("test"))); + superAdmin.namespaces().createNamespace(namespace, Sets.newHashSet("test")); + superAdmin.topics().createPartitionedTopic(topicName, 1); + assertEquals(tenantAdmin.topics().getPartitionedTopicList(namespace), + Lists.newArrayList(topicName)); + + // grant topic consume&produce authorization to the subscriptionRole + superAdmin.topics().grantPermission(topicName, subscriptionRole, + Sets.newHashSet(AuthAction.produce, AuthAction.consume)); + replacePulsarClient(PulsarClient.builder() + .serviceUrl(pulsar.getBrokerServiceUrl()) + .authentication(subAdminAuthentication)); + + @Cleanup + Producer batchProducer = pulsarClient.newProducer().topic(topicName) + .enableBatching(false) + .create(); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer().topic(topicName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionName(subscriptionName) + .subscribe(); + + CompletableFuture completableFuture = new CompletableFuture<>(); + for (int i = 0; i < 10; i++) { + completableFuture = batchProducer.sendAsync("a".getBytes()); + } + completableFuture.get(); + assertEquals(sub1Admin.topics().getStats(topicName + "-partition-0").getSubscriptions() + .get(subscriptionName).getMsgBacklog(), 10); + + // subscriptionRole doesn't have namespace-level authorization, so it will fail to clear backlog + try { + sub1Admin.topics().getPartitionedTopicList(namespace); + fail("should have failed with authorization exception"); + } catch (Exception e) { + assertTrue(e.getMessage().startsWith( + "Unauthorized to validateNamespaceOperation for operation [GET_TOPICS]")); + } + try { + sub1Admin.namespaces().clearNamespaceBundleBacklog(namespace, "0x00000000_0xffffffff"); + fail("should have failed with authorization exception"); + } catch (Exception e) { + assertTrue(e.getMessage().startsWith( + "Unauthorized to validateNamespaceOperation for operation [CLEAR_BACKLOG]")); + } + + superAdmin.namespaces().grantPermissionOnNamespace(namespace, subscriptionRole, + Sets.newHashSet(AuthAction.consume)); + // now, subscriptionRole have consume authorization on namespace, so it will successfully clear backlog + assertEquals(sub1Admin.topics().getPartitionedTopicList(namespace), + Lists.newArrayList(topicName)); + sub1Admin.namespaces().clearNamespaceBundleBacklog(namespace, "0x00000000_0xffffffff"); + assertEquals(sub1Admin.topics().getStats(topicName + "-partition-0").getSubscriptions() + .get(subscriptionName).getMsgBacklog(), 0); + + superAdmin.namespaces().revokePermissionsOnNamespace(namespace, subscriptionRole); + superAdmin.namespaces().grantPermissionOnNamespace(namespace, subscriptionRole, + Sets.newHashSet(AuthAction.produce)); + assertEquals(sub1Admin.topics().getPartitionedTopicList(namespace), + Lists.newArrayList(topicName)); + log.info("-- Exiting {} test --", methodName); + } + @Test public void testSubscriptionPrefixAuthorization() throws Exception { log.info("-- Starting {} test --", methodName); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java index 251ccd3eda3bd..670d184e5fc36 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/BrokerServiceLookupTest.java @@ -654,15 +654,18 @@ public void testModularLoadManagerSplitBundle() throws Exception { conf2.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); conf2.setZookeeperServers("localhost:2181"); conf2.setConfigurationStoreServers("localhost:3181"); - - @Cleanup - PulsarService pulsar2 = startBroker(conf2); + conf2.setLoadBalancerAutoBundleSplitEnabled(true); + conf2.setLoadBalancerAutoUnloadSplitBundlesEnabled(true); + conf2.setLoadBalancerNamespaceBundleMaxTopics(1); // configure broker-1 with ModularLoadManager stopBroker(); conf.setLoadManagerClassName(ModularLoadManagerImpl.class.getName()); startBroker(); + @Cleanup + PulsarService pulsar2 = startBroker(conf2); + pulsar.getLoadManager().get().writeLoadReportOnZookeeper(); pulsar2.getLoadManager().get().writeLoadReportOnZookeeper(); @@ -732,9 +735,6 @@ public void testModularLoadManagerSplitBundle() throws Exception { .getLoadManager().get()).getLoadManager(); updateAllMethod.invoke(loadManager); - conf2.setLoadBalancerAutoBundleSplitEnabled(true); - conf2.setLoadBalancerAutoUnloadSplitBundlesEnabled(true); - conf2.setLoadBalancerNamespaceBundleMaxTopics(1); loadManager.checkNamespaceBundleSplit(); // (6) Broker-2 should get the watch and update bundle cache @@ -882,6 +882,7 @@ public void onThrowable(Throwable t) { private AsyncHttpClient getHttpClient(String version) { DefaultAsyncHttpClientConfig.Builder confBuilder = new DefaultAsyncHttpClientConfig.Builder(); + confBuilder.setUseProxyProperties(true); confBuilder.setFollowRedirect(true); confBuilder.setUserAgent(version); confBuilder.setKeepAliveStrategy(new DefaultKeepAliveStrategy() { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java new file mode 100644 index 0000000000000..289a7a6797d14 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientAuthenticationTlsTest.java @@ -0,0 +1,183 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.expectThrows; +import java.util.HashSet; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminBuilder; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.impl.auth.AuthenticationTls; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Test(groups = "broker-api") +public class ClientAuthenticationTlsTest extends ProducerConsumerBase { + private final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem"; + private final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem"; + private final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem"; + + private final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem"; + private final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem"; + + private final Authentication authenticationTls = + new AuthenticationTls(TLS_CLIENT_CERT_FILE_PATH, TLS_CLIENT_KEY_FILE_PATH); + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + + conf.setClusterName(configClusterName); + + Set providers = new HashSet<>(); + providers.add(AuthenticationProviderTls.class.getName()); + conf.setAuthenticationProviders(providers); + + conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH); + conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH); + conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + + conf.setTlsAllowInsecureConnection(false); + + conf.setBrokerClientTlsEnabled(true); + conf.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); + conf.setBrokerClientAuthenticationParameters( + "tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); + conf.setBrokerClientTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); + } + + @BeforeClass(alwaysRun = true) + @Override + protected void setup() throws Exception { + super.internalSetup(); + setupDefaultTenantAndNamespace(); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Override + protected void customizeNewPulsarAdminBuilder(PulsarAdminBuilder pulsarAdminBuilder) { + super.customizeNewPulsarAdminBuilder(pulsarAdminBuilder); + pulsarAdminBuilder.authentication(authenticationTls); + } + + @Test + public void testAdminWithTrustCert() throws PulsarClientException, PulsarAdminException { + @Cleanup + PulsarAdmin pulsarAdmin = PulsarAdmin.builder().serviceHttpUrl(getPulsar().getWebServiceAddressTls()) + .sslProvider("JDK") + .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .build(); + pulsarAdmin.clusters().getClusters(); + } + + @Test + public void testAdminWithFull() throws PulsarClientException, PulsarAdminException { + @Cleanup + PulsarAdmin pulsarAdmin = PulsarAdmin.builder().serviceHttpUrl(getPulsar().getWebServiceAddressTls()) + .sslProvider("JDK") + .authentication(authenticationTls) + .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .build(); + pulsarAdmin.clusters().getClusters(); + } + + @Test + public void testAdminWithCertAndKey() throws PulsarClientException, PulsarAdminException { + @Cleanup + PulsarAdmin pulsarAdmin = PulsarAdmin.builder().serviceHttpUrl(getPulsar().getWebServiceAddressTls()) + .sslProvider("JDK") + .authentication(authenticationTls) + .build(); + PulsarAdminException adminException = + expectThrows(PulsarAdminException.class, () -> pulsarAdmin.clusters().getClusters()); + assertTrue(adminException.getMessage().contains("PKIX path")); + } + + @Test + public void testAdminWithoutTls() throws PulsarClientException, PulsarAdminException { + @Cleanup + PulsarAdmin pulsarAdmin = PulsarAdmin.builder().serviceHttpUrl(getPulsar().getWebServiceAddressTls()) + .sslProvider("JDK") + .build(); + PulsarAdminException adminException = + expectThrows(PulsarAdminException.class, () -> pulsarAdmin.clusters().getClusters()); + assertTrue(adminException.getMessage().contains("PKIX path")); + } + + @Test + public void testClientWithTrustCert() throws PulsarClientException, PulsarAdminException { + @Cleanup + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(getPulsar().getBrokerServiceUrlTls()) + .sslProvider("JDK") + .operationTimeout(3, TimeUnit.SECONDS) + .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .build(); + @Cleanup + Producer ignored = pulsarClient.newProducer().topic(UUID.randomUUID().toString()).create(); + } + + @Test + public void testClientWithFull() throws PulsarClientException, PulsarAdminException { + @Cleanup + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(getPulsar().getBrokerServiceUrlTls()) + .sslProvider("JDK") + .operationTimeout(3, TimeUnit.SECONDS) + .authentication(authenticationTls) + .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) + .build(); + @Cleanup + Producer ignored = pulsarClient.newProducer().topic(UUID.randomUUID().toString()).create(); + } + + @Test + public void testClientWithCertAndKey() throws PulsarClientException { + @Cleanup + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(getPulsar().getBrokerServiceUrlTls()) + .sslProvider("JDK") + .operationTimeout(3, TimeUnit.SECONDS) + .authentication(authenticationTls) + .build(); + assertThrows(PulsarClientException.class, + () -> pulsarClient.newProducer().topic(UUID.randomUUID().toString()).create()); + } + + @Test + public void testClientWithoutTls() throws PulsarClientException, PulsarAdminException { + @Cleanup + PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(getPulsar().getBrokerServiceUrlTls()) + .sslProvider("JDK") + .operationTimeout(3, TimeUnit.SECONDS) + .build(); + assertThrows(PulsarClientException.class, + () -> pulsarClient.newProducer().topic(UUID.randomUUID().toString()).create()); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationFailureTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationFailureTest.java index a2b4b5c3dbf53..2ddb9e8c8a35a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationFailureTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationFailureTest.java @@ -81,7 +81,7 @@ void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName("use"); config.setWebServicePort(Optional.of(0)); config.setZookeeperServers("127.0.0.1" + ":" + bkEnsemble.getZookeeperPort()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationTest.java index 304bb6eaaa02d..52017444a2b76 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientDeduplicationTest.java @@ -20,19 +20,37 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; +import org.apache.pulsar.common.util.FutureUtil; import org.awaitility.Awaitility; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; +@Slf4j @Test(groups = "flaky") public class ClientDeduplicationTest extends ProducerConsumerBase { + @DataProvider + public static Object[][] batchingTypes() { + return new Object[][] { + { BatcherBuilder.DEFAULT }, + { BatcherBuilder.KEY_BASED } + }; + } + @BeforeClass @Override protected void setup() throws Exception { @@ -46,7 +64,7 @@ protected void cleanup() throws Exception { super.internalCleanup(); } - @Test + @Test(priority = -1) public void testNamespaceDeduplicationApi() throws Exception { final String namespace = "my-property/my-ns"; assertNull(admin.namespaces().getDeduplicationStatus(namespace)); @@ -174,9 +192,10 @@ public void testProducerDeduplication() throws Exception { producer.close(); } - @Test(timeOut = 30000) - public void testProducerDeduplicationWithDiscontinuousSequenceId() throws Exception { - String topic = "persistent://my-property/my-ns/testProducerDeduplicationWithDiscontinuousSequenceId"; + @Test(timeOut = 30000, dataProvider = "batchingTypes") + public void testProducerDeduplicationWithDiscontinuousSequenceId(BatcherBuilder batcherBuilder) throws Exception { + String topic = "persistent://my-property/my-ns/testProducerDeduplicationWithDiscontinuousSequenceId-" + + System.currentTimeMillis(); admin.namespaces().setDeduplicationStatus("my-property/my-ns", true); // Set infinite timeout @@ -185,7 +204,9 @@ public void testProducerDeduplicationWithDiscontinuousSequenceId() throws Except .topic(topic) .producerName("my-producer-name") .enableBatching(true) + .batcherBuilder(batcherBuilder) .batchingMaxMessages(10) + .batchingMaxPublishDelay(1L, TimeUnit.HOURS) .sendTimeout(0, TimeUnit.SECONDS); Producer producer = producerBuilder.create(); @@ -208,7 +229,8 @@ public void testProducerDeduplicationWithDiscontinuousSequenceId() throws Except producer.flush(); for (int i = 0; i < 4; i++) { - Message msg = consumer.receive(); + Message msg = consumer.receive(3, TimeUnit.SECONDS); + assertNotNull(msg); assertEquals(new String(msg.getData()), "my-message-" + i); consumer.acknowledge(msg); } @@ -284,4 +306,68 @@ public void testProducerDeduplicationNonBatchAsync() throws Exception { producer.close(); } + + @Test(timeOut = 30000) + public void testKeyBasedBatchingOrder() throws Exception { + final String topic = "persistent://my-property/my-ns/test-key-based-batching-order"; + admin.namespaces().setDeduplicationStatus("my-property/my-ns", true); + + final Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .subscribe(); + final Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .batcherBuilder(BatcherBuilder.KEY_BASED) + .batchingMaxMessages(100) + .batchingMaxBytes(1024 * 1024 * 5) + .batchingMaxPublishDelay(1, TimeUnit.HOURS) + .create(); + // | key | sequence id list | + // | :-- | :--------------- | + // | A | 0, 3, 4 | + // | B | 1, 2 | + final List> sendFutures = new ArrayList<>(); + sendFutures.add(producer.newMessage().key("A").value("msg-0").sequenceId(0L).sendAsync()); + sendFutures.add(producer.newMessage().key("B").value("msg-1").sequenceId(1L).sendAsync()); + sendFutures.add(producer.newMessage().key("B").value("msg-2").sequenceId(2L).sendAsync()); + sendFutures.add(producer.newMessage().key("A").value("msg-3").sequenceId(3L).sendAsync()); + sendFutures.add(producer.newMessage().key("A").value("msg-4").sequenceId(4L).sendAsync()); + // The message order is expected to be [1, 2, 0, 3, 4]. The sequence ids are not ordered strictly, but: + // 1. The sequence ids for a given key are ordered. + // 2. The highest sequence ids of batches are ordered. + producer.flush(); + + FutureUtil.waitForAll(sendFutures); + final List sendMessageIds = sendFutures.stream().map(CompletableFuture::join) + .collect(Collectors.toList()); + for (int i = 0; i < sendMessageIds.size(); i++) { + log.info("Send msg-{} to {}", i, sendMessageIds.get(i)); + } + + final List sequenceIdList = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + final Message msg = consumer.receive(3, TimeUnit.SECONDS); + if (msg == null) { + break; + } + log.info("Received {}, key: {}, seq id: {}, msg id: {}", + msg.getValue(), msg.getKey(), msg.getSequenceId(), msg.getMessageId()); + assertNotNull(msg); + sequenceIdList.add(msg.getSequenceId()); + } + assertEquals(sequenceIdList, Arrays.asList(1L, 2L, 0L, 3L, 4L)); + + for (int i = 0; i < 5; i++) { + // Currently sending a duplicated message won't throw an exception. Instead, an invalid result is returned. + final MessageId messageId = producer.newMessage().value("msg").sequenceId(i).send(); + assertTrue(messageId instanceof BatchMessageIdImpl); + final BatchMessageIdImpl messageIdImpl = (BatchMessageIdImpl) messageId; + assertEquals(messageIdImpl.getLedgerId(), -1L); + assertEquals(messageIdImpl.getEntryId(), -1L); + } + + consumer.close(); + producer.close(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientErrorsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientErrorsTest.java index f66b19808f564..d98f0d57da096 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientErrorsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ClientErrorsTest.java @@ -39,6 +39,7 @@ import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.schema.SchemaVersion; +import org.awaitility.Awaitility; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; @@ -170,6 +171,7 @@ private void producerCreateFailAfterRetryTimeout(String topic) throws Exception PulsarClient client = PulsarClient.builder().serviceUrl(mockBrokerService.getBrokerAddress()) .operationTimeout(1, TimeUnit.SECONDS).build(); final AtomicInteger counter = new AtomicInteger(0); + final AtomicInteger closeProducerCounter = new AtomicInteger(0); mockBrokerService.setHandleProducer((ctx, producer) -> { if (counter.incrementAndGet() == 2) { @@ -182,6 +184,10 @@ private void producerCreateFailAfterRetryTimeout(String topic) throws Exception ctx.writeAndFlush(Commands.newError(producer.getRequestId(), ServerError.ServiceNotReady, "msg")); }); + mockBrokerService.setHandleCloseProducer((ctx, closeProducer) -> { + closeProducerCounter.incrementAndGet(); + }); + try { client.newProducer().topic(topic).create(); fail("Should have failed"); @@ -189,8 +195,103 @@ private void producerCreateFailAfterRetryTimeout(String topic) throws Exception // we fail even on the retriable error assertTrue(e instanceof PulsarClientException); } + // There is a small race condition here because the producer's timeout both fails the client creation + // and triggers sending CloseProducer. + Awaitility.await().until(() -> closeProducerCounter.get() == 1); + mockBrokerService.resetHandleProducer(); + mockBrokerService.resetHandleCloseProducer(); + } + + @Test + public void testCreatedProducerSendsCloseProducerAfterTimeout() throws Exception { + producerCreatedThenFailsRetryTimeout("persistent://prop/use/ns/t1"); + } + + @Test + public void testCreatedPartitionedProducerSendsCloseProducerAfterTimeout() throws Exception { + producerCreatedThenFailsRetryTimeout("persistent://prop/use/ns/part-t1"); + } + + private void producerCreatedThenFailsRetryTimeout(String topic) throws Exception { + @Cleanup + PulsarClient client = PulsarClient.builder().serviceUrl(mockBrokerService.getBrokerAddress()) + .operationTimeout(1, TimeUnit.SECONDS).build(); + final AtomicInteger producerCounter = new AtomicInteger(0); + final AtomicInteger closeProducerCounter = new AtomicInteger(0); + mockBrokerService.setHandleProducer((ctx, producer) -> { + int producerCount = producerCounter.incrementAndGet(); + if (producerCount == 1) { + ctx.writeAndFlush(Commands.newProducerSuccess(producer.getRequestId(), "producer1", + SchemaVersion.Empty)); + // Trigger reconnect + ctx.writeAndFlush(Commands.newCloseProducer(producer.getProducerId(), -1)); + } else if (producerCount != 2) { + // Respond to subsequent requests to prevent timeouts + ctx.writeAndFlush(Commands.newProducerSuccess(producer.getRequestId(), "producer1", + SchemaVersion.Empty)); + } + // Don't respond to the second Producer command to ensure timeout + }); + + mockBrokerService.setHandleCloseProducer((ctx, closeProducer) -> { + closeProducerCounter.incrementAndGet(); + ctx.writeAndFlush(Commands.newSuccess(closeProducer.getRequestId())); + }); + + // Create producer should succeed then upon closure, it should reattempt creation. The first request will + // time out, which triggers CloseProducer. The client might send the third Producer command before the + // below assertion, so we pass with 2 or 3. + client.newProducer().topic(topic).create(); + Awaitility.await().until(() -> closeProducerCounter.get() == 1); + Awaitility.await().until(() -> producerCounter.get() == 2 || producerCounter.get() == 3); mockBrokerService.resetHandleProducer(); + mockBrokerService.resetHandleCloseProducer(); + } + + @Test + public void testCreatedConsumerSendsCloseConsumerAfterTimeout() throws Exception { + consumerCreatedThenFailsRetryTimeout("persistent://prop/use/ns/t1"); + } + + @Test + public void testCreatedPartitionedConsumerSendsCloseConsumerAfterTimeout() throws Exception { + consumerCreatedThenFailsRetryTimeout("persistent://prop/use/ns/part-t1"); + } + + private void consumerCreatedThenFailsRetryTimeout(String topic) throws Exception { + @Cleanup + PulsarClient client = PulsarClient.builder().serviceUrl(mockBrokerService.getBrokerAddress()) + .operationTimeout(1, TimeUnit.SECONDS).build(); + final AtomicInteger subscribeCounter = new AtomicInteger(0); + final AtomicInteger closeConsumerCounter = new AtomicInteger(0); + + mockBrokerService.setHandleSubscribe((ctx, subscribe) -> { + int subscribeCount = subscribeCounter.incrementAndGet(); + if (subscribeCount == 1) { + ctx.writeAndFlush(Commands.newSuccess(subscribe.getRequestId())); + // Trigger reconnect + ctx.writeAndFlush(Commands.newCloseConsumer(subscribe.getConsumerId(), -1)); + } else if (subscribeCount != 2) { + // Respond to subsequent requests to prevent timeouts + ctx.writeAndFlush(Commands.newSuccess(subscribe.getRequestId())); + } + // Don't respond to the second Subscribe command to ensure timeout + }); + + mockBrokerService.setHandleCloseConsumer((ctx, closeConsumer) -> { + closeConsumerCounter.incrementAndGet(); + ctx.writeAndFlush(Commands.newSuccess(closeConsumer.getRequestId())); + }); + + // Create consumer (subscribe) should succeed then upon closure, it should reattempt creation. The first + // request will time out, which triggers CloseConsumer. The client might send the third Subscribe command before + // the below assertion, so we pass with 2 or 3. + client.newConsumer().topic(topic).subscriptionName("test").subscribe(); + Awaitility.await().until(() -> closeConsumerCounter.get() == 1); + Awaitility.await().until(() -> subscribeCounter.get() == 2 || subscribeCounter.get() == 3); + mockBrokerService.resetHandleSubscribe(); + mockBrokerService.resetHandleCloseConsumer(); } @Test @@ -491,7 +592,6 @@ public void testPartitionedProducerFailOnInitialization() throws Throwable { mockBrokerService.resetHandleProducer(); mockBrokerService.resetHandleCloseProducer(); - client.close(); } // failed to connect to partition at sending step if a producer which connects to broker as lazy-loading mode @@ -552,7 +652,6 @@ public void testPartitionedProducerFailOnSending() throws Throwable { mockBrokerService.resetHandleProducer(); mockBrokerService.resetHandleCloseProducer(); - client.close(); } // if a producer which doesn't connect as lazy-loading mode fails to connect while creating partitioned producer, diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ConsumerBatchReceiveTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ConsumerBatchReceiveTest.java index 5522dd7e495a2..19cb25664b2fb 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ConsumerBatchReceiveTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ConsumerBatchReceiveTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.client.api; import lombok.Cleanup; +import org.apache.pulsar.client.impl.ConsumerBase; +import org.awaitility.Awaitility; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; @@ -48,6 +50,14 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @DataProvider(name = "partitioned") + public Object[][] partitionedTopicProvider() { + return new Object[][] { + { true }, + { false } + }; + } + @DataProvider(name = "batchReceivePolicy") public Object[][] batchReceivePolicyProvider() { return new Object[][] { @@ -403,6 +413,65 @@ public void verifyBatchSizeIsEqualToPolicyConfiguration() throws Exception { receiveAllBatchesAndVerifyBatchSizeIsEqualToMaxNumMessages(consumer, batchReceivePolicy, messagesToSend / muxNumMessages); } + @Test + public void verifyNumBytesSmallerThanMessageSize() throws Exception { + final int messagesToSend = 500; + + final String topic = "persistent://my-property/my-ns/batch-receive-" + UUID.randomUUID(); + BatchReceivePolicy batchReceivePolicy = BatchReceivePolicy.builder().maxNumBytes(10).build(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic).create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("s2") + .batchReceivePolicy(batchReceivePolicy) + .subscribe(); + + sendMessagesAsyncAndWait(producer, messagesToSend); + CountDownLatch latch = new CountDownLatch(messagesToSend+1); + receiveAsync(consumer, messagesToSend, latch); + latch.await(); + } + + @Test(dataProvider = "partitioned") + public void testBatchReceiveTimeoutTask(boolean partitioned) throws Exception { + final String topic = "persistent://my-property/my-ns/batch-receive-" + UUID.randomUUID(); + + if (partitioned) { + admin.topics().createPartitionedTopic(topic, 3); + } + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topic).create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .receiverQueueSize(1) + .batchReceivePolicy(BatchReceivePolicy.builder() + .maxNumBytes(1024 * 1024) + .maxNumMessages(1) + .timeout(5, TimeUnit.SECONDS) + .build()) + .subscribe(); + Assert.assertFalse(((ConsumerBase)consumer).hasBatchReceiveTimeout()); + final int messagesToSend = 500; + sendMessagesAsyncAndWait(producer, messagesToSend); + for (int i = 0; i < 100; i++) { + Assert.assertNotNull(consumer.receive()); + } + Assert.assertFalse(((ConsumerBase)consumer).hasBatchReceiveTimeout()); + for (int i = 0; i < 400; i++) { + Messages batchReceived = consumer.batchReceive(); + Assert.assertEquals(batchReceived.size(), 1); + } + Awaitility.await().untilAsserted(() -> Assert.assertFalse(((ConsumerBase)consumer).hasBatchReceiveTimeout())); + Assert.assertEquals(consumer.batchReceive().size(), 0); + Awaitility.await().untilAsserted(() -> Assert.assertFalse(((ConsumerBase)consumer).hasBatchReceiveTimeout())); + } + private void receiveAllBatchesAndVerifyBatchSizeIsEqualToMaxNumMessages(Consumer consumer, BatchReceivePolicy batchReceivePolicy, diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java index 645c7674b00c5..9dfe3cf9aca65 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DeadLetterTopicTest.java @@ -18,19 +18,26 @@ */ package org.apache.pulsar.client.api; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - +import lombok.Cleanup; import lombok.Data; import org.apache.avro.reflect.Nullable; import org.apache.pulsar.client.api.schema.GenericRecord; -import lombok.Cleanup; +import org.apache.pulsar.client.impl.ConsumerBuilderImpl; import org.apache.pulsar.client.util.RetryMessageUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,12 +45,6 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertNotNull; -import static org.testng.Assert.assertNull; -import static org.testng.Assert.assertTrue; -import static org.testng.Assert.fail; - @Test(groups = "flaky") public class DeadLetterTopicTest extends ProducerConsumerBase { @@ -62,6 +63,66 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + @Test + public void testDeadLetterTopicWithMessageKey() throws Exception { + final String topic = "persistent://my-property/my-ns/dead-letter-topic"; + + final int maxRedeliveryCount = 1; + + final int sendMessages = 100; + + Consumer consumer = pulsarClient.newConsumer(Schema.BYTES) + .topic(topic) + .subscriptionName("my-subscription") + .subscriptionType(SubscriptionType.Shared) + .ackTimeout(1, TimeUnit.SECONDS) + .deadLetterPolicy(DeadLetterPolicy.builder().maxRedeliverCount(maxRedeliveryCount).build()) + .receiverQueueSize(100) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + @Cleanup + PulsarClient newPulsarClient = newPulsarClient(lookupUrl.toString(), 0);// Creates new client connection + Consumer deadLetterConsumer = newPulsarClient.newConsumer(Schema.BYTES) + .topic("persistent://my-property/my-ns/dead-letter-topic-my-subscription-DLQ") + .subscriptionName("my-subscription") + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .topic(topic) + .create(); + + for (int i = 0; i < sendMessages; i++) { + producer.newMessage() + .key("test-key") + .value(String.format("Hello Pulsar [%d]", i).getBytes()) + .send(); + } + + producer.close(); + + int totalReceived = 0; + do { + Message message = consumer.receive(); + log.info("consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + totalReceived++; + } while (totalReceived < sendMessages * (maxRedeliveryCount + 1)); + + int totalInDeadLetter = 0; + do { + Message message = deadLetterConsumer.receive(); + assertEquals(message.getKey(), "test-key"); + log.info("dead letter consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + deadLetterConsumer.acknowledge(message); + totalInDeadLetter++; + } while (totalInDeadLetter < sendMessages); + + deadLetterConsumer.close(); + consumer.close(); + } + + @Test(groups = "quarantine") public void testDeadLetterTopic() throws Exception { final String topic = "persistent://my-property/my-ns/dead-letter-topic"; @@ -615,4 +676,16 @@ public void testDeadLetterTopicUnderPartitionedTopicWithKeyShareType() throws Ex checkConsumer.close(); } + + @Test + public void testDeadLetterPolicyDeserialize() { + ConsumerBuilder consumerBuilder = pulsarClient.newConsumer(Schema.STRING); + DeadLetterPolicy policy = + DeadLetterPolicy.builder().deadLetterTopic("a").retryLetterTopic("a") + .maxRedeliverCount(1).build(); + consumerBuilder.deadLetterPolicy(policy); + Map config = new HashMap<>(); + consumerBuilder.loadConf(config); + assertEquals(((ConsumerBuilderImpl)consumerBuilder).getConf().getDeadLetterPolicy(), policy); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java index d43a759bf3622..867ea1592ebde 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/DispatcherBlockConsumerTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.api; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; @@ -25,7 +26,11 @@ import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Maps; +import com.google.common.collect.Multimap; +import com.google.common.collect.Queues; +import com.google.common.collect.Sets; import java.lang.reflect.Field; import java.util.Iterator; import java.util.List; @@ -41,8 +46,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import java.util.stream.Collectors; - - import lombok.Cleanup; import org.apache.pulsar.broker.namespace.NamespaceService; import org.apache.pulsar.broker.service.BrokerService; @@ -50,8 +53,8 @@ import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.impl.ConsumerImpl; import org.apache.pulsar.client.impl.MessageIdImpl; -import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.policies.data.SubscriptionStats; +import org.apache.pulsar.common.policies.data.TopicStats; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,12 +64,6 @@ import org.testng.annotations.Test; import org.testng.collections.Lists; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.Maps; -import com.google.common.collect.Multimap; -import com.google.common.collect.Queues; -import com.google.common.collect.Sets; - @Test(groups = "flaky") public class DispatcherBlockConsumerTest extends ProducerConsumerBase { private static final Logger log = LoggerFactory.getLogger(DispatcherBlockConsumerTest.class); @@ -634,7 +631,8 @@ public void testBrokerSubscriptionRecovery(boolean unloadBundleGracefully) throw // if broker unload bundle gracefully then cursor metadata recovered from zk else from ledger if (unloadBundleGracefully) { // set clean namespace which will not let broker unload bundle gracefully: stop broker - Supplier namespaceServiceSupplier = () -> spy(new NamespaceService(pulsar)); + Supplier namespaceServiceSupplier = + () -> spyWithClassAndConstructorArgs(NamespaceService.class, pulsar); doReturn(namespaceServiceSupplier).when(pulsar).getNamespaceServiceProvider(); } stopBroker(); @@ -676,7 +674,7 @@ public void testBrokerSubscriptionRecovery(boolean unloadBundleGracefully) throw * * */ - @Test(timeOut = 10000) + @Test(timeOut = 60000) public void testBlockBrokerDispatching() { log.info("-- Starting {} test --", methodName); @@ -688,6 +686,7 @@ public void testBlockBrokerDispatching() { ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); try { + final int waitMills = 500; final int maxUnAckPerBroker = 200; final double unAckMsgPercentagePerDispatcher = 10; int maxUnAckPerDispatcher = (int) ((maxUnAckPerBroker * unAckMsgPercentagePerDispatcher) / 100); // 200 * @@ -745,7 +744,7 @@ public void testBlockBrokerDispatching() { Message msg = null; Set messages1 = Sets.newHashSet(); for (int j = 0; j < totalProducedMsgs; j++) { - msg = consumer1Sub1.receive(100, TimeUnit.MILLISECONDS); + msg = consumer1Sub1.receive(waitMills, TimeUnit.MILLISECONDS); if (msg != null) { messages1.add(msg.getMessageId()); } else { @@ -754,7 +753,7 @@ public void testBlockBrokerDispatching() { // once consumer receives maxUnAckPerBroker-msgs then sleep to give a chance to scheduler to block the // subscription if (j == maxUnAckPerBroker) { - Thread.sleep(200); + Thread.sleep(waitMills); } } // client must receive number of messages = maxUnAckPerbroker rather all produced messages @@ -767,7 +766,7 @@ public void testBlockBrokerDispatching() { .subscriptionType(SubscriptionType.Shared).acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); int consumer2Msgs = 0; for (int j = 0; j < totalProducedMsgs; j++) { - msg = consumer2Sub1.receive(100, TimeUnit.MILLISECONDS); + msg = consumer2Sub1.receive(waitMills, TimeUnit.MILLISECONDS); if (msg != null) { consumer2Msgs++; } else { @@ -792,7 +791,7 @@ public void testBlockBrokerDispatching() { .subscriptionType(SubscriptionType.Shared).acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); Set messages2 = Sets.newHashSet(); for (int j = 0; j < totalProducedMsgs; j++) { - msg = consumerSub2.receive(100, TimeUnit.MILLISECONDS); + msg = consumerSub2.receive(waitMills, TimeUnit.MILLISECONDS); if (msg != null) { messages2.add(msg.getMessageId()); } else { @@ -809,7 +808,7 @@ public void testBlockBrokerDispatching() { .subscriptionType(SubscriptionType.Shared).acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); int consumedMsgsSub3 = 0; for (int j = 0; j < totalProducedMsgs; j++) { - msg = consumer1Sub3.receive(100, TimeUnit.MILLISECONDS); + msg = consumer1Sub3.receive(); if (msg != null) { consumedMsgsSub3++; consumer1Sub3.acknowledge(msg); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java index d1f007537ebe2..057f0c6392a93 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/KeySharedSubscriptionTest.java @@ -40,7 +40,11 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -1069,6 +1073,75 @@ public void testSelectorChangedAfterAllConsumerDisconnected(String topicDomain) consumer1.close(); } + @Test(timeOut = 30_000) + public void testCheckConsumersWithSameName() throws Exception { + final String topicName = "persistent://public/default/same-name-" + UUID.randomUUID(); + final String subName = "my-sub"; + final String consumerName = "name"; + + ConsumerBuilder cb = pulsarClient.newConsumer(Schema.STRING) + .topic(topicName) + .subscriptionName(subName) + .consumerName(consumerName) + .subscriptionType(SubscriptionType.Key_Shared); + + // Create 3 consumers with same name + Consumer c1 = cb.subscribe(); + + @Cleanup + Consumer c2 = cb.subscribe(); + @Cleanup + Consumer c3 = cb.subscribe(); + + Producer p = pulsarClient.newProducer(Schema.STRING) + .topic(topicName) + .create(); + for (int i = 0; i < 100; i++) { + p.newMessage() + .key(Integer.toString(i)) + .value("msg-" + i) + .send(); + } + + // C1 receives some messages and won't ack + for (int i = 0; i < 5; i++) { + c1.receive(); + } + + // Close C1, now all messages should go to c2 & c3 + c1.close(); + + CountDownLatch l = new CountDownLatch(100); + + @Cleanup("shutdownNow") + ExecutorService e = Executors.newCachedThreadPool(); + e.submit(() -> { + while (l.getCount() > 0) { + try { + Message msg = c2.receive(1, TimeUnit.SECONDS); + c2.acknowledge(msg); + l.countDown(); + } catch (PulsarClientException ex) { + ex.printStackTrace(); + } + } + }); + + e.submit(() -> { + while (l.getCount() > 0) { + try { + Message msg = c3.receive(1, TimeUnit.SECONDS); + c3.acknowledge(msg); + l.countDown(); + } catch (PulsarClientException ex) { + ex.printStackTrace(); + } + } + }); + + l.await(); + } + private KeySharedMode getKeySharedModeOfSubscription(Topic topic, String subscription) { if (TopicName.get(topic.getName()).getDomain().equals(TopicDomain.persistent)) { return ((PersistentStickyKeyDispatcherMultipleConsumers) topic.getSubscription(subscription) @@ -1300,4 +1373,131 @@ public EncryptionKeyInfo getPrivateKey(String keyName, Map keyMe return null; } } + + @Test + public void testStickyKeyRangesRestartConsumers() throws PulsarClientException, InterruptedException { + final String topic = TopicName.get("persistent", "public", "default", + "testStickyKeyRangesRestartConsumers" + UUID.randomUUID()).toString(); + + final String subscriptionName = "my-sub"; + + final int numMessages = 100; + // start 2 consumers + Set sentMessages = new ConcurrentSkipListSet<>(); + + CountDownLatch count1 = new CountDownLatch(2); + CountDownLatch count2 = new CountDownLatch(13); // consumer 2 usually receive the fix messages + CountDownLatch count3 = new CountDownLatch(numMessages); + Consumer consumer1 = pulsarClient.newConsumer( + Schema.STRING) + .topic(topic) + .subscriptionName(subscriptionName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Key_Shared) + .keySharedPolicy(KeySharedPolicy.stickyHashRange().ranges(Range.of(0, 65536 / 2))) + .messageListener((consumer, msg) -> { + consumer.acknowledgeAsync(msg).whenComplete((m, e) -> { + if (e != null) { + log.error("error", e); + } else { + sentMessages.remove(msg.getKey()); + count1.countDown(); + count3.countDown(); + } + }); + }) + .subscribe(); + + Consumer consumer2 = pulsarClient.newConsumer( + Schema.STRING) + .topic(topic) + .subscriptionName(subscriptionName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Key_Shared) + .keySharedPolicy(KeySharedPolicy.stickyHashRange().ranges(Range.of(65536 / 2 + 1, 65535))) + .messageListener((consumer, msg) -> { + consumer.acknowledgeAsync(msg).whenComplete((m, e) -> { + if (e != null) { + log.error("error", e); + } else { + sentMessages.remove(msg.getKey()); + count2.countDown(); + count3.countDown(); + } + }); + }) + .subscribe(); + + pulsar.getExecutor().submit(() -> { + try + { + try (Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .enableBatching(false) + .create();) { + for (int i = 0; i < numMessages; i++) + { + String key = "test" + i; + sentMessages.add(key); + producer.newMessage() + .key(key) + .value("test" + i). + send(); + Thread.sleep(100); + } + } + } catch (Throwable t) { + log.error("error", t); + }}); + + // wait for some messages to be received by both of the consumers + count1.await(); + count2.await(); + consumer1.close(); + consumer2.close(); + + // this sleep is to trigger a race condition that happens + // when there are some messages that cannot be dispatched while consuming + Thread.sleep(3000); + + // start consuming again... + + pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName(subscriptionName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Key_Shared) + .keySharedPolicy(KeySharedPolicy.stickyHashRange().ranges(Range.of(0, 65536 / 2))) + .messageListener((consumer, msg) -> { + consumer.acknowledgeAsync(msg).whenComplete((m, e) -> { + if (e != null) { + log.error("error", e); + } else { + sentMessages.remove(msg.getKey()); + count3.countDown(); + } + }); + }) + .subscribe(); + pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName(subscriptionName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Key_Shared) + .keySharedPolicy(KeySharedPolicy.stickyHashRange().ranges(Range.of(65536 / 2 + 1, 65535))) + .messageListener((consumer, msg) -> { + consumer.acknowledgeAsync(msg).whenComplete((m, e) -> { + if (e != null) { + log.error("error", e); + } else { + sentMessages.remove(msg.getKey()); + count3.countDown(); + } + }); + }) + .subscribe(); + // wait for all the messages to be delivered + count3.await(); + assertTrue(sentMessages.isEmpty(), "didn't receive " + sentMessages); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MemoryLimitTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MemoryLimitTest.java index ec98e7d1dbec9..431991e61aa19 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MemoryLimitTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MemoryLimitTest.java @@ -18,20 +18,22 @@ */ package org.apache.pulsar.client.api; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.fail; - -import java.util.concurrent.CountDownLatch; - import lombok.Cleanup; - import org.apache.pulsar.client.api.PulsarClientException.MemoryBufferIsFullError; -import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.client.impl.ProducerImpl; +import org.apache.pulsar.client.impl.PulsarTestClient; +import org.awaitility.Awaitility; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; + @Test(groups = "broker-api") public class MemoryLimitTest extends ProducerConsumerBase { @@ -62,27 +64,30 @@ public void testRejectMessages() throws Exception { String topic = newTopicName(); - @Cleanup - PulsarClientImpl client = (PulsarClientImpl) PulsarClient.builder() + ClientBuilder clientBuilder = PulsarClient.builder() .serviceUrl(pulsar.getBrokerServiceUrl()) - .memoryLimit(100, SizeUnit.KILO_BYTES) - .build(); + .memoryLimit(100, SizeUnit.KILO_BYTES); @Cleanup - Producer producer = client.newProducer() + PulsarTestClient client = PulsarTestClient.create(clientBuilder); + + @Cleanup + ProducerImpl producer = (ProducerImpl) client.newProducer() .topic(topic) .blockIfQueueFull(false) + .sendTimeout(5, TimeUnit.SECONDS) .create(); + // make sure all message pending at pendingMessages queue + // connection with broker can not be established, so handleSendReceipt will not be invoked while sending message + client.dropOpSendMessages(); final int n = 101; - CountDownLatch latch = new CountDownLatch(n); - for (int i = 0; i < n; i++) { - producer.sendAsync(new byte[1024]).thenRun(() -> { - latch.countDown(); - }); + producer.sendAsync(new byte[1024]); } - + Awaitility.await() + .atMost(Duration.ofSeconds(5)) + .until(() -> producer.getPendingQueueSize() == n); assertEquals(client.getMemoryLimitController().currentUsage(), n * 1024); try { @@ -92,8 +97,10 @@ public void testRejectMessages() // Expected } - latch.await(); - + client.allowReconnecting(); + Awaitility.await() + .atMost(Duration.ofSeconds(30)) + .until(() -> producer.getPendingQueueSize() == 0); assertEquals(client.getMemoryLimitController().currentUsage(), 0); // We should now be able to send again @@ -105,41 +112,40 @@ public void testRejectMessagesOnMultipleTopics() throws Exception { String t1 = newTopicName(); String t2 = newTopicName(); - @Cleanup - PulsarClientImpl client = (PulsarClientImpl) PulsarClient.builder() + ClientBuilder clientBuilder = PulsarClient.builder() .serviceUrl(pulsar.getBrokerServiceUrl()) - .memoryLimit(100, SizeUnit.KILO_BYTES) - .build(); + .memoryLimit(100, SizeUnit.KILO_BYTES); + + @Cleanup + PulsarTestClient client = PulsarTestClient.create(clientBuilder); @Cleanup - Producer p1 = client.newProducer() + ProducerImpl p1 = (ProducerImpl) client.newProducer() .topic(t1) .blockIfQueueFull(false) + .sendTimeout(5, TimeUnit.SECONDS) .create(); @Cleanup - Producer p2 = client.newProducer() + ProducerImpl p2 = (ProducerImpl) client.newProducer() .topic(t2) .blockIfQueueFull(false) + .sendTimeout(5, TimeUnit.SECONDS) .create(); + client.dropOpSendMessages(); final int n = 101; - CountDownLatch latch = new CountDownLatch(n); - for (int i = 0; i < n / 2; i++) { - p1.sendAsync(new byte[1024]).thenRun(() -> { - latch.countDown(); - }); - p2.sendAsync(new byte[1024]).thenRun(() -> { - latch.countDown(); - }); + p1.sendAsync(new byte[1024]); + p2.sendAsync(new byte[1024]); } // Last message in order to reach the limit - p1.sendAsync(new byte[1024]).thenRun(() -> { - latch.countDown(); - }); + p1.sendAsync(new byte[1024]); + Awaitility.await() + .atMost(Duration.ofSeconds(5)) + .until(() -> (p1.getPendingQueueSize() + p2.getPendingQueueSize()) == n); assertEquals(client.getMemoryLimitController().currentUsage(), n * 1024); try { @@ -156,8 +162,10 @@ public void testRejectMessagesOnMultipleTopics() throws Exception { // Expected } - latch.await(); - + client.allowReconnecting(); + Awaitility.await() + .atMost(Duration.ofSeconds(30)) + .until(() -> (p1.getPendingQueueSize() + p2.getPendingQueueSize()) == 0); assertEquals(client.getMemoryLimitController().currentUsage(), 0); // We should now be able to send again diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiRolesTokenAuthorizationProviderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiRolesTokenAuthorizationProviderTest.java new file mode 100644 index 0000000000000..12d7c71358bbd --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiRolesTokenAuthorizationProviderTest.java @@ -0,0 +1,231 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.api; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertThrows; +import com.google.common.collect.Sets; +import io.jsonwebtoken.Jwts; +import io.jsonwebtoken.SignatureAlgorithm; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import javax.crypto.SecretKey; +import lombok.Cleanup; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.broker.authorization.MultiRolesTokenAuthorizationProvider; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminBuilder; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +public class MultiRolesTokenAuthorizationProviderTest extends MockedPulsarServiceBaseTest { + + private final SecretKey secretKey = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + private final String superUserToken; + private final String normalUserToken; + + public MultiRolesTokenAuthorizationProviderTest() { + Map claims = new HashMap<>(); + Set roles = new HashSet<>(); + roles.add("user1"); + roles.add("superUser"); + claims.put("roles", roles); + superUserToken = Jwts.builder() + .setClaims(claims) + .signWith(secretKey) + .compact(); + + roles = new HashSet<>(); + roles.add("normalUser"); + roles.add("user2"); + roles.add("user5"); + claims.put("roles", roles); + normalUserToken = Jwts.builder() + .setClaims(claims) + .signWith(secretKey) + .compact(); + } + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + + conf.setAuthenticationEnabled(true); + conf.setAuthorizationEnabled(true); + + Set superUserRoles = new HashSet<>(); + superUserRoles.add("superUser"); + conf.setSuperUserRoles(superUserRoles); + + Properties properties = new Properties(); + properties.setProperty("tokenSecretKey", + "data:;base64," + Base64.getEncoder().encodeToString(secretKey.getEncoded())); + properties.setProperty("tokenAuthClaim", "roles"); + conf.setProperties(properties); + + conf.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + conf.setBrokerClientAuthenticationParameters(superUserToken); + + Set providers = new HashSet<>(); + providers.add(AuthenticationProviderToken.class.getName()); + conf.setAuthenticationProviders(providers); + conf.setAuthorizationProvider(MultiRolesTokenAuthorizationProvider.class.getName()); + + conf.setClusterName(configClusterName); + conf.setNumExecutorThreadPoolSize(5); + } + + @BeforeClass + @Override + protected void setup() throws Exception { + super.internalSetup(); + + admin.clusters().createCluster(configClusterName, + ClusterData.builder() + .brokerServiceUrl(brokerUrl.toString()) + .serviceUrl(getPulsar().getWebServiceAddress()) + .build() + ); + } + + @BeforeClass + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @Override + protected void customizeNewPulsarClientBuilder(ClientBuilder clientBuilder) { + clientBuilder.authentication(new AuthenticationToken(superUserToken)); + } + + @Override + protected void customizeNewPulsarAdminBuilder(PulsarAdminBuilder pulsarAdminBuilder) { + pulsarAdminBuilder.authentication(new AuthenticationToken(superUserToken)); + } + + private PulsarAdmin newPulsarAdmin(String token) throws PulsarClientException { + return PulsarAdmin.builder() + .serviceHttpUrl(pulsar.getWebServiceAddress()) + .authentication(new AuthenticationToken(token)) + .requestTimeout(3, TimeUnit.SECONDS) + .build(); + } + + private PulsarClient newPulsarClient(String token) throws PulsarClientException { + return PulsarClient.builder() + .serviceUrl(pulsar.getBrokerServiceUrl()) + .authentication(new AuthenticationToken(token)) + .operationTimeout(3, TimeUnit.SECONDS) + .build(); + } + + @Test + public void testAdminRequestWithSuperUserToken() throws Exception { + String tenant = "superuser-admin-tenant"; + @Cleanup + PulsarAdmin admin = newPulsarAdmin(superUserToken); + admin.tenants().createTenant(tenant, TenantInfo.builder() + .allowedClusters(Sets.newHashSet(configClusterName)).build()); + String namespace = "superuser-admin-namespace"; + admin.namespaces().createNamespace(tenant + "/" + namespace); + admin.brokers().getAllDynamicConfigurations(); + admin.tenants().getTenants(); + admin.topics().getList(tenant + "/" + namespace); + } + + @Test + public void testProduceAndConsumeWithSuperUserToken() throws Exception { + String tenant = "superuser-client-tenant"; + @Cleanup + PulsarAdmin admin = newPulsarAdmin(superUserToken); + admin.tenants().createTenant(tenant, TenantInfo.builder() + .allowedClusters(Sets.newHashSet(configClusterName)).build()); + String namespace = "superuser-client-namespace"; + admin.namespaces().createNamespace(tenant + "/" + namespace); + String topic = tenant + "/" + namespace + "/" + "test-topic"; + + @Cleanup + PulsarClient client = newPulsarClient(superUserToken); + @Cleanup + Producer producer = client.newProducer().topic(topic).create(); + byte[] body = "hello".getBytes(StandardCharsets.UTF_8); + producer.send(body); + + @Cleanup + Consumer consumer = client.newConsumer().topic(topic) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionName("test") + .subscribe(); + Message message = consumer.receive(3, TimeUnit.SECONDS); + assertNotNull(message); + assertEquals(message.getData(), body); + } + + @Test + public void testAdminRequestWithNormalUserToken() throws Exception { + String tenant = "normaluser-admin-tenant"; + @Cleanup + PulsarAdmin admin = newPulsarAdmin(normalUserToken); + + assertThrows(PulsarAdminException.NotAuthorizedException.class, + () -> admin.tenants().createTenant(tenant, TenantInfo.builder() + .allowedClusters(Sets.newHashSet(configClusterName)).build())); + } + + @Test + public void testProduceAndConsumeWithNormalUserToken() throws Exception { + String tenant = "normaluser-client-tenant"; + @Cleanup + PulsarAdmin admin = newPulsarAdmin(superUserToken); + admin.tenants().createTenant(tenant, TenantInfo.builder() + .allowedClusters(Sets.newHashSet(configClusterName)).build()); + String namespace = "normaluser-client-namespace"; + admin.namespaces().createNamespace(tenant + "/" + namespace); + String topic = tenant + "/" + namespace + "/" + "test-topic"; + + @Cleanup + PulsarClient client = newPulsarClient(normalUserToken); + assertThrows(PulsarClientException.AuthorizationException.class, () -> { + @Cleanup + Producer ignored = client.newProducer().topic(topic).create(); + }); + + assertThrows(PulsarClientException.AuthorizationException.class, () -> { + @Cleanup + Consumer ignored = client.newConsumer().topic(topic) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionName("test") + .subscribe(); + }); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java index 715f3adb7aaf6..29ecb39853a2e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/MultiTopicsConsumerTest.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.apache.pulsar.client.api; import static org.mockito.ArgumentMatchers.any; @@ -24,9 +25,16 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import com.google.common.collect.Lists; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import lombok.Cleanup; +import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.impl.ClientBuilderImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; @@ -34,6 +42,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -63,13 +72,14 @@ protected PulsarClient createNewPulsarClient(ClientBuilder clientBuilder) throws return new PulsarClientImpl(conf) { { ScheduledExecutorService internalExecutorService = - (ScheduledExecutorService) super.getInternalExecutorService(); + (ScheduledExecutorService) super.getScheduledExecutorProvider().getExecutor(); internalExecutorServiceDelegate = mock(ScheduledExecutorService.class, // a spy isn't used since that doesn't work for private classes, instead // the mock delegatesTo an existing instance. A delegate is sufficient for verifying // method calls on the interface. Mockito.withSettings().defaultAnswer(AdditionalAnswers.delegatesTo(internalExecutorService))); } + @Override public ExecutorService getInternalExecutorService() { return internalExecutorServiceDelegate; @@ -119,4 +129,69 @@ public void testMultiTopicsConsumerCloses() throws Exception { verify(internalExecutorServiceDelegate, times(0)) .schedule(any(Runnable.class), anyLong(), any()); } + + // test that reproduces the issue that PR https://github.com/apache/pulsar/pull/12456 fixes + // where MultiTopicsConsumerImpl has a data race that causes out-of-order delivery of messages + @Test + public void testShouldMaintainOrderForIndividualTopicInMultiTopicsConsumer() + throws PulsarAdminException, PulsarClientException, ExecutionException, InterruptedException, + TimeoutException { + String topicName = newTopicName(); + int numPartitions = 2; + int numMessages = 100000; + admin.topics().createPartitionedTopic(topicName, numPartitions); + + Producer[] producers = new Producer[numPartitions]; + + for (int i = 0; i < numPartitions; i++) { + producers[i] = pulsarClient.newProducer(Schema.INT64) + // produce to each partition directly so that order can be maintained in sending + .topic(topicName + "-partition-" + i) + .enableBatching(true) + .maxPendingMessages(30000) + .maxPendingMessagesAcrossPartitions(60000) + .batchingMaxMessages(10000) + .batchingMaxPublishDelay(5, TimeUnit.SECONDS) + .batchingMaxBytes(4 * 1024 * 1024) + .blockIfQueueFull(true) + .create(); + } + + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.INT64) + // consume on the partitioned topic + .topic(topicName) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .receiverQueueSize(numMessages) + .subscriptionName(methodName) + .subscribe(); + + // produce sequence numbers to each partition topic + long sequenceNumber = 1L; + for (int i = 0; i < numMessages; i++) { + for (Producer producer : producers) { + producer.newMessage() + .value(sequenceNumber) + .sendAsync(); + } + sequenceNumber++; + } + for (Producer producer : producers) { + producer.close(); + } + + // receive and validate sequences in the partitioned topic + Map receivedSequences = new HashMap<>(); + int receivedCount = 0; + while (receivedCount < numPartitions * numMessages) { + Message message = consumer.receiveAsync().get(5, TimeUnit.SECONDS); + consumer.acknowledge(message); + receivedCount++; + AtomicLong receivedSequenceCounter = + receivedSequences.computeIfAbsent(message.getTopicName(), k -> new AtomicLong(1L)); + Assert.assertEquals(message.getValue().longValue(), receivedSequenceCounter.getAndIncrement()); + } + Assert.assertEquals(numPartitions * numMessages, receivedCount); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java index eae107cb8f880..1b8a16822db04 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/ProducerConsumerBase.java @@ -26,7 +26,7 @@ import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.apache.pulsar.common.policies.data.TenantInfo; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.testng.Assert; import org.testng.annotations.BeforeMethod; @@ -63,6 +63,25 @@ protected void testMessageOrderAndDuplicates(Set messagesReceived, T rece Assert.assertTrue(messagesReceived.add(receivedMessage), "Received duplicate message " + receivedMessage); } + protected void setupDefaultTenantAndNamespace() throws Exception { + final String tenant = "public"; + final String namespace = tenant + "/default"; + + if (!admin.clusters().getClusters().contains(configClusterName)) { + admin.clusters().createCluster(configClusterName, + ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); + } + + if (!admin.tenants().getTenants().contains(tenant)) { + admin.tenants().createTenant(tenant, TenantInfo.builder().allowedClusters( + Sets.newHashSet(configClusterName)).build()); + } + + if (!admin.namespaces().getNamespaces(tenant).contains(namespace)) { + admin.namespaces().createNamespace(namespace); + } + } + private static final Random random = new Random(); protected String newTopicName() { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/RetryTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/RetryTopicTest.java index fc84a62dac1cd..00b48afb8bd31 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/RetryTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/RetryTopicTest.java @@ -18,18 +18,25 @@ */ package org.apache.pulsar.client.api; -import lombok.Cleanup; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.fail; +import com.google.common.collect.Sets; +import java.lang.reflect.Field; +import java.util.List; +import java.util.Set; import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import org.apache.pulsar.client.impl.ConsumerImpl; +import org.apache.pulsar.client.impl.MultiTopicsConsumerImpl; +import org.apache.pulsar.client.util.RetryMessageUtil; +import org.reflections.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -import java.util.concurrent.TimeUnit; - -import static org.testng.Assert.assertNull; - @Test(groups = "broker-api") public class RetryTopicTest extends ProducerConsumerBase { @@ -119,6 +126,100 @@ public void testRetryTopic() throws Exception { checkConsumer.close(); } + @Test + public void testRetryTopicProperties() throws Exception { + final String topic = "persistent://my-property/my-ns/retry-topic"; + + final int maxRedeliveryCount = 3; + + final int sendMessages = 10; + + Consumer consumer = pulsarClient.newConsumer(Schema.BYTES) + .topic(topic) + .subscriptionName("my-subscription") + .subscriptionType(SubscriptionType.Shared) + .enableRetry(true) + .deadLetterPolicy(DeadLetterPolicy.builder().maxRedeliverCount(maxRedeliveryCount).build()) + .receiverQueueSize(100) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + @Cleanup + PulsarClient newPulsarClient = newPulsarClient(lookupUrl.toString(), 0); + Consumer deadLetterConsumer = newPulsarClient.newConsumer(Schema.BYTES) + .topic("persistent://my-property/my-ns/retry-topic-my-subscription-DLQ") + .subscriptionName("my-subscription") + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .topic(topic) + .create(); + + Set originMessageIds = Sets.newHashSet(); + for (int i = 0; i < sendMessages; i++) { + MessageId msgId = producer.send(String.format("Hello Pulsar [%d]", i).getBytes()); + originMessageIds.add(msgId.toString()); + } + + producer.close(); + + int totalReceived = 0; + Set retryMessageIds = Sets.newHashSet(); + do { + Message message = consumer.receive(); + log.info("consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + // retry message + if (message.hasProperty(RetryMessageUtil.SYSTEM_PROPERTY_RECONSUMETIMES)) { + // check the REAL_TOPIC property + assertEquals(message.getProperty(RetryMessageUtil.SYSTEM_PROPERTY_REAL_TOPIC), topic); + retryMessageIds.add(message.getProperty(RetryMessageUtil.SYSTEM_PROPERTY_ORIGIN_MESSAGE_ID)); + } + consumer.reconsumeLater(message, 1, TimeUnit.SECONDS); + totalReceived++; + } while (totalReceived < sendMessages * (maxRedeliveryCount + 1)); + + // check the REAL_TOPIC property + assertEquals(retryMessageIds, originMessageIds); + + int totalInDeadLetter = 0; + Set deadLetterMessageIds = Sets.newHashSet(); + do { + Message message = deadLetterConsumer.receive(); + log.info("dead letter consumer received message : {} {}", message.getMessageId(), + new String(message.getData())); + // dead letter message + if (message.hasProperty(RetryMessageUtil.SYSTEM_PROPERTY_RECONSUMETIMES)) { + // check the REAL_TOPIC property + assertEquals(message.getProperty(RetryMessageUtil.SYSTEM_PROPERTY_REAL_TOPIC), topic); + deadLetterMessageIds.add(message.getProperty(RetryMessageUtil.SYSTEM_PROPERTY_ORIGIN_MESSAGE_ID)); + } + deadLetterConsumer.acknowledge(message); + totalInDeadLetter++; + } while (totalInDeadLetter < sendMessages); + + assertEquals(deadLetterMessageIds, originMessageIds); + + deadLetterConsumer.close(); + consumer.close(); + + Consumer checkConsumer = this.pulsarClient.newConsumer(Schema.BYTES) + .topic(topic) + .subscriptionName("my-subscription") + .subscriptionType(SubscriptionType.Shared) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + Message checkMessage = checkConsumer.receive(3, TimeUnit.SECONDS); + if (checkMessage != null) { + log.info("check consumer received message : {} {}", checkMessage.getMessageId(), + new String(checkMessage.getData())); + } + assertNull(checkMessage); + + checkConsumer.close(); + } + //Issue 9327: do compatibility check in case of the default retry and dead letter topic name changed @Test public void testRetryTopicNameForCompatibility () throws Exception { @@ -348,4 +449,58 @@ public void testRetryTopicByCustomTopicName() throws Exception { checkConsumer.close(); } + + @Test(timeOut = 30000L) + public void testRetryTopicException() throws Exception { + final String topic = "persistent://my-property/my-ns/retry-topic"; + final int maxRedeliveryCount = 2; + final int sendMessages = 1; + // subscribe before publish + Consumer consumer = pulsarClient.newConsumer(Schema.BYTES) + .topic(topic) + .subscriptionName("my-subscription") + .subscriptionType(SubscriptionType.Shared) + .enableRetry(true) + .receiverQueueSize(100) + .deadLetterPolicy(DeadLetterPolicy.builder() + .maxRedeliverCount(maxRedeliveryCount) + .retryLetterTopic("persistent://my-property/my-ns/my-subscription-custom-Retry") + .build()) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .topic(topic) + .create(); + for (int i = 0; i < sendMessages; i++) { + producer.send(String.format("Hello Pulsar [%d]", i).getBytes()); + } + producer.close(); + + // mock a retry producer exception when reconsumelater is called + MultiTopicsConsumerImpl multiTopicsConsumer = (MultiTopicsConsumerImpl) consumer; + List> consumers = multiTopicsConsumer.getConsumers(); + for (ConsumerImpl c : consumers) { + Set deadLetterPolicyField = + ReflectionUtils.getAllFields(c.getClass(), ReflectionUtils.withName("deadLetterPolicy")); + + if (deadLetterPolicyField.size() != 0) { + Field field = deadLetterPolicyField.iterator().next(); + field.setAccessible(true); + DeadLetterPolicy deadLetterPolicy = (DeadLetterPolicy) field.get(c); + deadLetterPolicy.setRetryLetterTopic("#persistent://invlaid-topic#"); + } + } + Message message = consumer.receive(); + log.info("consumer received message : {} {}", message.getMessageId(), new String(message.getData())); + try { + consumer.reconsumeLater(message, 1, TimeUnit.SECONDS); + } catch (PulsarClientException.InvalidTopicNameException e) { + assertEquals(e.getClass(), PulsarClientException.InvalidTopicNameException.class); + } catch (Exception e) { + fail("exception should be PulsarClientException.InvalidTopicNameException"); + } + consumer.close(); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerStatTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerStatTest.java index cc83ba2c331ca..8f93494fab3b2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerStatTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerStatTest.java @@ -33,7 +33,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.gson.Gson; import org.apache.pulsar.broker.stats.NamespaceStats; import org.apache.pulsar.client.admin.PulsarAdminException; import org.slf4j.Logger; @@ -46,6 +45,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.common.util.concurrent.RateLimiter; +import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonObject; @@ -76,6 +76,11 @@ public Object[][] ackTimeoutSecProvider() { return new Object[][] { { 0, 0 }, { 0, 2 }, { 1000, 0 }, { 1000, 2 } }; } + @DataProvider(name = "batchingEnabled") + public Object[][] batchingEnabled() { + return new Object[][] { { true }, { false } }; + } + @Test(dataProvider = "batch_with_timeout") public void testSyncProducerAndConsumer(int batchMessageDelayMs, int ackTimeoutSec) throws Exception { log.info("-- Starting {} test --", methodName); @@ -335,6 +340,7 @@ public void testSendTimeout(int batchMessageDelayMs) throws Exception { log.info("-- Exiting {} test --", methodName); } + @Test public void testBatchMessagesRateOut() throws PulsarClientException, InterruptedException, PulsarAdminException { log.info("-- Starting {} test --", methodName); String topicName = "persistent://my-property/cluster/my-ns/testBatchMessagesRateOut"; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java index 2eebbf8a0dc1b..555191ae59988 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleProducerConsumerTest.java @@ -39,6 +39,7 @@ import com.google.common.collect.Sets; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; +import io.netty.util.Timeout; import java.io.ByteArrayInputStream; import java.io.IOException; import java.lang.reflect.Field; @@ -59,6 +60,7 @@ import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; @@ -77,6 +79,7 @@ import org.apache.bookkeeper.common.concurrent.FutureUtils; import org.apache.bookkeeper.mledger.impl.EntryCacheImpl; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -87,6 +90,7 @@ import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.client.impl.MultiTopicsConsumerImpl; +import org.apache.pulsar.client.impl.PartitionedProducerImpl; import org.apache.pulsar.client.impl.TopicMessageImpl; import org.apache.pulsar.client.impl.TypedMessageBuilderImpl; import org.apache.pulsar.client.impl.crypto.MessageCryptoBc; @@ -145,6 +149,16 @@ public Object[][] ackReceiptEnabled() { return new Object[][] { { true }, { false } }; } + @DataProvider(name = "ackReceiptEnabledAndSubscriptionTypes") + public Object[][] ackReceiptEnabledAndSubscriptionTypes() { + return new Object[][] { + {true, SubscriptionType.Shared}, + {true, SubscriptionType.Key_Shared}, + {false, SubscriptionType.Shared}, + {false, SubscriptionType.Key_Shared}, + }; + } + @AfterMethod(alwaysRun = true) @Override protected void cleanup() throws Exception { @@ -606,6 +620,83 @@ public void testSendTimeout(int batchMessageDelayMs) throws Exception { log.info("-- Exiting {} test --", methodName); } + @Test(dataProvider = "batch") + public void testSendTimeoutAndRecover(int batchMessageDelayMs) throws Exception { + log.info("-- Starting {} test --", methodName); + + int numPartitions = 6; + TopicName topicName = TopicName.get("persistent://my-property/my-ns/sendTimeoutAndRecover-1"); + admin.topics().createPartitionedTopic(topicName.toString(), numPartitions); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer().topic(topicName.toString()) + .subscriptionName("my-subscriber-name").subscribe(); + ProducerBuilder producerBuilder = pulsarClient.newProducer() + .topic(topicName.toString()).sendTimeout(1, TimeUnit.SECONDS); + + if (batchMessageDelayMs != 0) { + producerBuilder.enableBatching(true); + producerBuilder.batchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS); + producerBuilder.batchingMaxMessages(5); + } + + @Cleanup + PartitionedProducerImpl partitionedProducer = + (PartitionedProducerImpl) producerBuilder.create(); + final String message = "my-message"; + // 1. Trigger the send timeout + stopBroker(); + + partitionedProducer.sendAsync(message.getBytes()); + + String exceptionMessage = ""; + try { + // 2. execute flush to get results, + // it should be failed because step 1 + partitionedProducer.flush(); + Assert.fail("Send operation should have failed"); + } catch (PulsarClientException e) { + exceptionMessage = e.getMessage(); + } + + // 3. execute flush to get results, + // it shouldn't fail because we already handled the exception in the step 2, unless we keep sending data. + partitionedProducer.flush(); + // 4. execute flushAsync, we only catch the exception once, + // but by getting the original lastSendFuture twice below, + // the same exception information must be caught twice to verify that our handleOnce works as expected. + try { + partitionedProducer.getOriginalLastSendFuture().get(); + Assert.fail("Send operation should have failed"); + } catch (Exception e) { + Assert.assertEquals(PulsarClientException.unwrap(e).getMessage(), exceptionMessage); + } + try { + partitionedProducer.getOriginalLastSendFuture().get(); + Assert.fail("Send operation should have failed"); + } catch (Exception e) { + Assert.assertEquals(PulsarClientException.unwrap(e).getMessage(), exceptionMessage); + } + + startBroker(); + + // 5. We should not have received any message + Message msg = consumer.receive(RECEIVE_TIMEOUT_SECONDS, TimeUnit.SECONDS); + Assert.assertNull(msg); + + // 6. We keep sending data after connection reconnected. + partitionedProducer.sendAsync(message.getBytes()); + // 7. This flush operation must succeed. + partitionedProducer.flush(); + + // 8. We should have received message + msg = consumer.receive(RECEIVE_TIMEOUT_SECONDS, TimeUnit.SECONDS); + Assert.assertNotNull(msg); + Assert.assertEquals(new String(msg.getData()), message); + + log.info("-- Exiting {} test --", methodName); + } + @Test public void testInvalidSequence() throws Exception { log.info("-- Starting {} test --", methodName); @@ -739,10 +830,11 @@ public void testSillyUser() { // This is to test that the flow control counter doesn't get corrupted while concurrent receives during // reconnections - @Test(dataProvider = "batch", groups = "quarantine") + @Test(timeOut = 100_000, dataProvider = "batch", groups = "quarantine") public void testConcurrentConsumerReceiveWhileReconnect(int batchMessageDelayMs) throws Exception { final int recvQueueSize = 100; final int numConsumersThreads = 10; + final int receiveTimeoutSeconds = 100; String subName = UUID.randomUUID().toString(); final Consumer consumer = pulsarClient.newConsumer() @@ -756,12 +848,11 @@ public void testConcurrentConsumerReceiveWhileReconnect(int batchMessageDelayMs) for (int i = 0; i < numConsumersThreads; i++) { executor.submit((Callable) () -> { barrier.await(); - consumer.receive(RECEIVE_TIMEOUT_SECONDS, TimeUnit.SECONDS); + consumer.receive(receiveTimeoutSeconds, TimeUnit.SECONDS); return null; }); } - - barrier.await(); + barrier.await(); // the last thread reach barrier, start consume messages // we restart the broker to reconnect restartBroker(); @@ -797,7 +888,7 @@ public void testConcurrentConsumerReceiveWhileReconnect(int batchMessageDelayMs) return null; }); } - barrier.await(); + barrier.await(); // the last thread reach barrier, start consume messages Awaitility.await().untilAsserted(() -> { // The available permits should be 20 and num messages in the queue should be 80 @@ -827,7 +918,7 @@ public void testConcurrentConsumerReceiveWhileReconnect(int batchMessageDelayMs) return null; }); } - barrier.await(); + barrier.await(); // the last thread reach barrier, start consume messages restartBroker(); @@ -1515,6 +1606,47 @@ public void testConsumerBlockingWithUnAckedMessagesMultipleIteration(boolean ack } } + @Test(dataProvider = "ackReceiptEnabledAndSubscriptionTypes") + public void testMaxUnAckMessagesLowerThanPermits(boolean ackReceiptEnabled, SubscriptionType subType) + throws PulsarClientException { + final int maxUnacks = 10; + pulsar.getConfiguration().setMaxUnackedMessagesPerConsumer(maxUnacks); + final String topic = "persistent://my-property/my-ns/testMaxUnAckMessagesLowerThanPermits"; + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic).subscriptionName("sub") + .subscriptionType(subType) + .isAckReceiptEnabled(ackReceiptEnabled) + .acknowledgmentGroupTime(0, TimeUnit.SECONDS) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .enableBatching(false) + .topic(topic) + .create(); + + final int messages = 1000; + for (int i = 0; i < messages; i++) { + producer.sendAsync("Message - " + i); + } + producer.flush(); + List receives = new ArrayList<>(); + for (int i = 0; i < maxUnacks; i++) { + Message received = consumer.receive(); + log.info("Received message {} with message ID {}", received.getValue(), received.getMessageId()); + receives.add(received.getMessageId()); + } + assertNull(consumer.receive(3, TimeUnit.SECONDS)); + consumer.acknowledge(receives); + for (int i = 0; i < messages - maxUnacks; i++) { + Message received = consumer.receive(); + log.info("Received message {} with message ID {}", received.getValue(), received.getMessageId()); + consumer.acknowledge(received); + } + } + /** * Verify: Consumer1 which doesn't send ack will not impact Consumer2 which sends ack for consumed message. * @@ -2360,7 +2492,7 @@ public void testRedeliveryFailOverConsumer(boolean ackReceiptEnabled) throws Exc Message msg; List> messages1 = Lists.newArrayList(); for (int i = 0; i < consumeMsgInParts; i++) { - msg = consumer.receive(RECEIVE_TIMEOUT_SECONDS, TimeUnit.SECONDS); + msg = consumer.receive(); if (msg != null) { messages1.add(msg); consumer.acknowledge(msg); @@ -2376,7 +2508,7 @@ public void testRedeliveryFailOverConsumer(boolean ackReceiptEnabled) throws Exc // (1.b) consume second consumeMsgInParts msgs and trigger redeliver messages1.clear(); for (int i = 0; i < consumeMsgInParts; i++) { - msg = consumer.receive(RECEIVE_TIMEOUT_SECONDS, TimeUnit.SECONDS); + msg = consumer.receive(); if (msg != null) { messages1.add(msg); consumer.acknowledge(msg); @@ -2399,7 +2531,7 @@ public void testRedeliveryFailOverConsumer(boolean ackReceiptEnabled) throws Exc int remainingMsgs = (2 * receiverQueueSize) - (2 * consumeMsgInParts); messages1.clear(); for (int i = 0; i < remainingMsgs; i++) { - msg = consumer.receive(RECEIVE_TIMEOUT_SECONDS, TimeUnit.SECONDS); + msg = consumer.receive(); if (msg != null) { messages1.add(msg); consumer.acknowledge(msg); @@ -2620,6 +2752,38 @@ public EncryptionKeyInfo getPrivateKey(String keyName, Map keyMe log.info("-- Exiting {} test --", methodName); } + @Test + public void testCryptoWithChunking() throws Exception { + final String topic = "persistent://my-property/my-ns/testCryptoWithChunking" + System.currentTimeMillis(); + final String ecdsaPublicKeyFile = "file:./src/test/resources/certificate/public-key.client-ecdsa.pem"; + final String ecdsaPrivateKeyFile = "file:./src/test/resources/certificate/private-key.client-ecdsa.pem"; + + this.conf.setMaxMessageSize(1000); + + @Cleanup + PulsarClient pulsarClient = newPulsarClient(lookupUrl.toString(), 0); + + @Cleanup + Consumer consumer1 = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") + .defaultCryptoKeyReader(ecdsaPrivateKeyFile).subscribe(); + @Cleanup + Producer producer1 = pulsarClient.newProducer().topic(topic) + .enableChunking(true) + .enableBatching(false) + .addEncryptionKey("client-ecdsa.pem") + .defaultCryptoKeyReader(ecdsaPublicKeyFile) + .create(); + + byte[] data = RandomUtils.nextBytes(5100); + MessageId id = producer1.send(data); + log.info("Message Id={}", id); + + MessageImpl message; + message = (MessageImpl) consumer1.receive(); + Assert.assertEquals(message.getData(), data); + Assert.assertEquals(message.getEncryptionCtx().get().getKeys().size(), 1); + } + @Test public void testDefaultCryptoKeyReader() throws Exception { final String topic = "persistent://my-property/my-ns/default-crypto-key-reader" + System.currentTimeMillis(); @@ -4247,4 +4411,81 @@ public void testShareConsumerWithMessageListener() throws Exception { assertEquals(resultSet.size(), total); }); } + + @Test + public void testPartitionsAutoUpdate() throws Exception { + log.info("-- Starting {} test --", methodName); + + int numPartitions = 3; + TopicName topicName = TopicName.get("persistent://my-property/my-ns/partitionsAutoUpdate-1"); + admin.topics().createPartitionedTopic(topicName.toString(), numPartitions); + + int operationTimeout = 2000; // MILLISECONDS + @Cleanup final PulsarClient client = PulsarClient.builder() + .serviceUrl(lookupUrl.toString()) + .operationTimeout(operationTimeout, TimeUnit.MILLISECONDS) + .build(); + + ProducerBuilder producerBuilder = client.newProducer() + .topic(topicName.toString()).sendTimeout(1, TimeUnit.SECONDS); + + @Cleanup + PartitionedProducerImpl partitionedProducer = + (PartitionedProducerImpl) producerBuilder.autoUpdatePartitions(true).create(); + + // Trigger the Connection refused exception + stopBroker(); + + log.info("trigger partitionsAutoUpdateTimerTask run failed for producer"); + Timeout timeout = partitionedProducer.getPartitionsAutoUpdateTimeout(); + timeout.task().run(timeout); + Awaitility.await().untilAsserted(() -> { + assertNotNull(partitionedProducer.getPartitionsAutoUpdateFuture()); + assertTrue(partitionedProducer.getPartitionsAutoUpdateFuture().isCompletedExceptionally()); + assertTrue(FutureUtil.getException(partitionedProducer.getPartitionsAutoUpdateFuture()).get().getMessage() + .contains("Connection refused:")); + }); + + startBroker(); + + log.info("trigger partitionsAutoUpdateTimerTask run successful for producer"); + timeout = partitionedProducer.getPartitionsAutoUpdateTimeout(); + timeout.task().run(timeout); + Awaitility.await().untilAsserted(() -> { + assertNotNull(partitionedProducer.getPartitionsAutoUpdateFuture()); + assertTrue(partitionedProducer.getPartitionsAutoUpdateFuture().isDone()); + assertFalse(partitionedProducer.getPartitionsAutoUpdateFuture().isCompletedExceptionally()); + }); + + log.info("-- Exiting {} test --", methodName); + } + + @Test(invocationCount = 5) + public void testListenerOrdering() throws Exception { + final String topic = "persistent://my-property/my-ns/test-listener-ordering-" + System.currentTimeMillis(); + final int numMessages = 1000; + final CountDownLatch latch = new CountDownLatch(numMessages); + final List values = new CopyOnWriteArrayList<>(); + final Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("sub") + .messageListener((MessageListener) (consumer1, msg) -> { + values.add(msg.getValue()); + latch.countDown(); + }) + .subscribe(); + final Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .create(); + for (int i = 0; i < numMessages; i++) { + producer.send("msg-" + i); + } + latch.await(3, TimeUnit.SECONDS); + producer.close(); + consumer.close(); + assertEquals(values.size(), numMessages); + for (int i = 0; i < numMessages; i++) { + assertEquals(values.get(i), "msg-" + i); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleSchemaTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleSchemaTest.java index fd8036eaf9e3a..cb8b8728ceee8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleSchemaTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/SimpleSchemaTest.java @@ -18,12 +18,6 @@ */ package org.apache.pulsar.client.api; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Cleanup; -import lombok.Data; -import lombok.NoArgsConstructor; - import static java.nio.charset.StandardCharsets.UTF_8; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; @@ -31,13 +25,22 @@ import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - +import java.io.ByteArrayInputStream; +import java.io.EOFException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Cleanup; +import lombok.Data; +import lombok.NoArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.apache.avro.reflect.ReflectData; import org.apache.avro.Schema.Parser; -import org.apache.pulsar.client.impl.MessageImpl; -import org.apache.pulsar.client.impl.schema.KeyValueSchemaImpl; -import org.apache.pulsar.common.schema.LongSchemaVersion; +import org.apache.avro.reflect.ReflectData; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.PulsarClientException.IncompatibleSchemaException; import org.apache.pulsar.client.api.PulsarClientException.InvalidMessageException; @@ -45,13 +48,16 @@ import org.apache.pulsar.client.impl.BinaryProtoLookupService; import org.apache.pulsar.client.impl.HttpLookupService; import org.apache.pulsar.client.impl.LookupService; +import org.apache.pulsar.client.impl.MessageImpl; import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.client.impl.schema.KeyValueSchemaImpl; +import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.client.impl.schema.reader.AvroReader; import org.apache.pulsar.client.impl.schema.writer.AvroWriter; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.schema.KeyValue; import org.apache.pulsar.common.schema.KeyValueEncodingType; +import org.apache.pulsar.common.schema.LongSchemaVersion; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; import org.testng.Assert; @@ -61,14 +67,6 @@ import org.testng.annotations.Factory; import org.testng.annotations.Test; -import java.io.ByteArrayInputStream; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - @Test(groups = "broker-api") @Slf4j public class SimpleSchemaTest extends ProducerConsumerBase { @@ -305,7 +303,13 @@ public void newProducerForMessageSchemaOnTopicWithMultiVersionSchema() throws Ex + " if SchemaValidationEnabled is enabled"); } Message msg3 = c.receive(); - Assert.assertEquals(msg3.getSchemaVersion(), SchemaVersion.Empty.bytes()); + assertNull(msg3.getSchemaVersion()); + try { + msg3.getValue(); + fail("Schema should be incompatible"); + } catch (SchemaSerializationException e) { + assertTrue(e.getCause() instanceof EOFException); + } } catch (PulsarClientException e) { if (schemaValidationEnforced) { Assert.assertTrue(e instanceof IncompatibleSchemaException); @@ -366,7 +370,13 @@ public void newNativeAvroProducerForMessageSchemaOnTopicWithMultiVersionSchema() + " if SchemaValidationEnabled is enabled"); } Message msg3 = c.receive(); - Assert.assertEquals(msg3.getSchemaVersion(), SchemaVersion.Empty.bytes()); + assertNull(msg3.getSchemaVersion()); + try { + msg3.getValue(); + fail("Schema should be incompatible"); + } catch (SchemaSerializationException e) { + assertTrue(e.getCause() instanceof EOFException); + } } catch (PulsarClientException e) { if (schemaValidationEnforced) { Assert.assertTrue(e instanceof IncompatibleSchemaException); @@ -443,6 +453,9 @@ public void newProducerForMessageSchemaOnTopicInitialWithNoSchema() throws Excep } List allSchemas = admin.schemas().getAllSchemas(topic); + allSchemas.forEach(schemaInfo -> { + ((SchemaInfoImpl)schemaInfo).setTimestamp(0); + }); Assert.assertEquals(allSchemas, Arrays.asList(v1Schema.getSchemaInfo(), v2Schema.getSchemaInfo())); } @@ -481,6 +494,9 @@ public void newNativeAvroProducerForMessageSchemaOnTopicInitialWithNoSchema() th } List allSchemas = admin.schemas().getAllSchemas(topic); + allSchemas.forEach(schemaInfo -> { + ((SchemaInfoImpl)schemaInfo).setTimestamp(0); + }); Assert.assertEquals(allSchemas, Arrays.asList(v1Schema.getSchemaInfo(), v2Schema.getSchemaInfo())); } @@ -1253,4 +1269,38 @@ public void testAutoKeyValueConsumeGenericObjectNullValues(KeyValueEncodingType } } + + @Test + public void testConsumeAvroMessagesWithoutSchema() throws Exception { + if (schemaValidationEnforced) { + return; + } + final String topic = "test-consume-avro-messages-without-schema-" + UUID.randomUUID(); + final Schema schema = Schema.AVRO(V1Data.class); + final Consumer consumer = pulsarClient.newConsumer(schema) + .topic(topic) + .subscriptionName("sub") + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + final Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + + final int numMessages = 5; + for (int i = 0; i < numMessages; i++) { + producer.send(schema.encode(new V1Data(i))); + } + + for (int i = 0; i < numMessages; i++) { + final Message msg = consumer.receive(3, TimeUnit.SECONDS); + assertNotNull(msg); + log.info("Received {} from {}", msg.getValue().i, topic); + assertEquals(msg.getValue().i, i); + assertEquals(msg.getReaderSchema().orElse(Schema.BYTES).getSchemaInfo(), schema.getSchemaInfo()); + consumer.acknowledge(msg); + } + + producer.close(); + consumer.close(); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java index 55c120592e1fc..e4cb941c650ca 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/api/v1/V1_ProducerConsumerTest.java @@ -1708,7 +1708,7 @@ public void testBlockUnackedConsumerRedeliverySpecificMessagesCloseConsumerWhile } // client should not receive all produced messages and should be blocked due to unack-messages - assertEquals(messages1.size(), receiverQueueSize); + assertEquals(messages1.size(), unAckedMessagesBufferSize); Set redeliveryMessages = messages1.stream().map(m -> { return (MessageIdImpl) m.getMessageId(); }).collect(Collectors.toSet()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java index 8e385a62b1b9b..911a23b1bb8cd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/BrokerClientIntegrationTest.java @@ -20,6 +20,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.UUID.randomUUID; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.any; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; @@ -69,6 +70,8 @@ import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.namespace.OwnershipCache; import org.apache.pulsar.broker.resources.BaseResources; +import org.apache.pulsar.broker.service.AbstractDispatcherSingleActiveConsumer; +import org.apache.pulsar.broker.service.ServerCnx; import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -820,8 +823,9 @@ public void testAvroSchemaProducerConsumerWithSpecifiedReaderAndWriter() throws public void testJsonSchemaProducerConsumerWithSpecifiedReaderAndWriter() throws PulsarClientException { final String topicName = "persistent://my-property/my-ns/my-topic1"; ObjectMapper mapper = new ObjectMapper(); - SchemaReader reader = Mockito.spy(new JacksonJsonReader<>(mapper, TestMessageObject.class)); - SchemaWriter writer = Mockito.spy(new JacksonJsonWriter<>(mapper)); + SchemaReader reader = + spyWithClassAndConstructorArgs(JacksonJsonReader.class, mapper, TestMessageObject.class); + SchemaWriter writer = spyWithClassAndConstructorArgs(JacksonJsonWriter.class, mapper); SchemaDefinition schemaDefinition = new SchemaDefinitionBuilderImpl() .withPojo(TestMessageObject.class) @@ -1005,4 +1009,56 @@ public void testConsumerWithPooledMessagesWithReader(boolean isBatchingEnabled) reader.close(); producer.close(); } + + @Test + public void testActiveConsumerCleanup() throws Exception { + log.info("-- Starting {} test --", methodName); + + int numMessages = 100; + final CountDownLatch latch = new CountDownLatch(numMessages); + String topic = "persistent://my-property/my-ns/closed-cnx-topic"; + String sub = "my-subscriber-name"; + + PulsarClient pulsarClient = newPulsarClient(lookupUrl.toString(), 0); + pulsarClient.newConsumer().topic(topic).subscriptionName(sub).messageListener((c1, msg) -> { + Assert.assertNotNull(msg, "Message cannot be null"); + String receivedMessage = new String(msg.getData()); + log.debug("Received message [{}] in the listener", receivedMessage); + c1.acknowledgeAsync(msg); + latch.countDown(); + }).subscribe(); + + PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic).get(); + + AbstractDispatcherSingleActiveConsumer dispatcher = (AbstractDispatcherSingleActiveConsumer) topicRef + .getSubscription(sub).getDispatcher(); + ServerCnx cnx = (ServerCnx) dispatcher.getActiveConsumer().cnx(); + Field field = ServerCnx.class.getDeclaredField("isActive"); + field.setAccessible(true); + field.set(cnx, false); + + assertNotNull(dispatcher.getActiveConsumer()); + + pulsarClient = newPulsarClient(lookupUrl.toString(), 0); + Consumer consumer = null; + for (int i = 0; i < 2; i++) { + try { + consumer = pulsarClient.newConsumer().topic(topic).subscriptionName(sub).messageListener((c1, msg) -> { + Assert.assertNotNull(msg, "Message cannot be null"); + String receivedMessage = new String(msg.getData()); + log.debug("Received message [{}] in the listener", receivedMessage); + c1.acknowledgeAsync(msg); + latch.countDown(); + }).subscribe(); + if (i == 0) { + fail("Should failed with ConsumerBusyException!"); + } + } catch (PulsarClientException.ConsumerBusyException ignore) { + // It's ok. + } + } + assertNotNull(consumer); + log.info("-- Exiting {} test --", methodName); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ClientCnxTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ClientCnxTest.java new file mode 100644 index 0000000000000..7f2d11fd3adf5 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ClientCnxTest.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import com.google.common.collect.Sets; +import io.netty.channel.ChannelHandlerContext; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.awaitility.Awaitility; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +@Test(groups = "broker-impl") +public class ClientCnxTest extends MockedPulsarServiceBaseTest { + + public static final String CLUSTER_NAME = "test"; + public static final String TENANT = "tnx"; + public static final String NAMESPACE = TENANT + "/ns1"; + public static String persistentTopic = "persistent://" + NAMESPACE + "/test"; + ExecutorService executorService = Executors.newFixedThreadPool(20); + + @BeforeClass + @Override + protected void setup() throws Exception { + super.internalSetup(); + admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder() + .serviceUrl(pulsar.getWebServiceAddress()).build()); + admin.tenants().createTenant(TENANT, + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); + admin.namespaces().createNamespace(NAMESPACE); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + this.executorService.shutdown(); + } + + @Test + public void testRemoveAndHandlePendingRequestInCnx() throws Exception { + + String subName = "sub"; + int operationTimes = 5000; + CountDownLatch countDownLatch = new CountDownLatch(operationTimes); + + Consumer consumer = pulsarClient.newConsumer() + .topic(persistentTopic) + .subscriptionName(subName) + .subscribe(); + + new Thread(() -> { + for (int i = 0; i < operationTimes; i++) { + executorService.submit(() -> { + consumer.getLastMessageIdAsync().whenComplete((ignore, exception) -> { + countDownLatch.countDown(); + }); + }); + } + }).start(); + + for (int i = 0; i < operationTimes; i++) { + ClientCnx cnx = ((ConsumerImpl) consumer).getClientCnx(); + if (cnx != null) { + ChannelHandlerContext context = cnx.ctx(); + if (context != null) { + cnx.ctx().close(); + } + } + } + + Awaitility.await().until(() -> { + countDownLatch.await(); + return true; + }); + + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java index 1e97550322b67..30583bb64cda3 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConnectionPoolTest.java @@ -21,6 +21,10 @@ import com.google.common.collect.Lists; import io.netty.channel.EventLoopGroup; import io.netty.util.concurrent.DefaultThreadFactory; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.stream.IntStream; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.util.netty.EventLoopUtil; @@ -30,22 +34,21 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.stream.IntStream; + +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; @Test(groups = "broker-impl") public class ConnectionPoolTest extends MockedPulsarServiceBaseTest { String serviceUrl; + int brokerPort; @BeforeClass @Override protected void setup() throws Exception { super.internalSetup(); - serviceUrl = "pulsar://non-existing-dns-name:" + pulsar.getBrokerListenPort().get(); + brokerPort = pulsar.getBrokerListenPort().get(); + serviceUrl = "pulsar://non-existing-dns-name:" + brokerPort; } @AfterClass(alwaysRun = true) @@ -58,13 +61,15 @@ protected void cleanup() throws Exception { public void testSingleIpAddress() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, new DefaultThreadFactory("test")); - ConnectionPool pool = Mockito.spy(new ConnectionPool(conf, eventLoop)); + ConnectionPool pool = spyWithClassAndConstructorArgs(ConnectionPool.class, conf, eventLoop); conf.setServiceUrl(serviceUrl); PulsarClientImpl client = new PulsarClientImpl(conf, eventLoop, pool); - List result = Lists.newArrayList(); - result.add(InetAddress.getByName("127.0.0.1")); - Mockito.when(pool.resolveName("non-existing-dns-name")).thenReturn(CompletableFuture.completedFuture(result)); + List result = Lists.newArrayList(); + result.add(new InetSocketAddress("127.0.0.1", brokerPort)); + Mockito.when(pool.resolveName(InetSocketAddress.createUnresolved("non-existing-dns-name", + brokerPort))) + .thenReturn(CompletableFuture.completedFuture(result)); client.newProducer().topic("persistent://sample/standalone/ns/my-topic").create(); @@ -74,20 +79,20 @@ public void testSingleIpAddress() throws Exception { @Test public void testDoubleIpAddress() throws Exception { - String serviceUrl = "pulsar://non-existing-dns-name:" + pulsar.getBrokerListenPort().get(); - ClientConfigurationData conf = new ClientConfigurationData(); EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, new DefaultThreadFactory("test")); - ConnectionPool pool = Mockito.spy(new ConnectionPool(conf, eventLoop)); + ConnectionPool pool = spyWithClassAndConstructorArgs(ConnectionPool.class, conf, eventLoop); conf.setServiceUrl(serviceUrl); PulsarClientImpl client = new PulsarClientImpl(conf, eventLoop, pool); - List result = Lists.newArrayList(); + List result = Lists.newArrayList(); // Add a non existent IP to the response to check that we're trying the 2nd address as well - result.add(InetAddress.getByName("127.0.0.99")); - result.add(InetAddress.getByName("127.0.0.1")); - Mockito.when(pool.resolveName("non-existing-dns-name")).thenReturn(CompletableFuture.completedFuture(result)); + result.add(new InetSocketAddress("127.0.0.99", brokerPort)); + result.add(new InetSocketAddress("127.0.0.1", brokerPort)); + Mockito.when(pool.resolveName(InetSocketAddress.createUnresolved("non-existing-dns-name", + brokerPort))) + .thenReturn(CompletableFuture.completedFuture(result)); // Create producer should succeed by trying the 2nd IP client.newProducer().topic("persistent://sample/standalone/ns/my-topic").create(); @@ -101,10 +106,10 @@ public void testNoConnectionPool() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); conf.setConnectionsPerBroker(0); EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(8, false, new DefaultThreadFactory("test")); - ConnectionPool pool = Mockito.spy(new ConnectionPool(conf, eventLoop)); + ConnectionPool pool = spyWithClassAndConstructorArgs(ConnectionPool.class, conf, eventLoop); InetSocketAddress brokerAddress = - InetSocketAddress.createUnresolved("127.0.0.1", pulsar.getBrokerListenPort().get()); + InetSocketAddress.createUnresolved("127.0.0.1", brokerPort); IntStream.range(1, 5).forEach(i -> { pool.getConnection(brokerAddress).thenAccept(cnx -> { Assert.assertTrue(cnx.channel().isActive()); @@ -123,10 +128,10 @@ public void testEnableConnectionPool() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); conf.setConnectionsPerBroker(5); EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(8, false, new DefaultThreadFactory("test")); - ConnectionPool pool = Mockito.spy(new ConnectionPool(conf, eventLoop)); + ConnectionPool pool = spyWithClassAndConstructorArgs(ConnectionPool.class, conf, eventLoop); InetSocketAddress brokerAddress = - InetSocketAddress.createUnresolved("127.0.0.1", pulsar.getBrokerListenPort().get()); + InetSocketAddress.createUnresolved("127.0.0.1", brokerPort); IntStream.range(1, 10).forEach(i -> { pool.getConnection(brokerAddress).thenAccept(cnx -> { Assert.assertTrue(cnx.channel().isActive()); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerAckResponseTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerAckResponseTest.java index 0378c53d05be6..698186539b3c6 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerAckResponseTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerAckResponseTest.java @@ -50,8 +50,10 @@ public void setup() throws Exception { super.producerBaseSetup(); doReturn(1L).when(transaction).getTxnIdLeastBits(); doReturn(1L).when(transaction).getTxnIdMostBits(); + doReturn(TransactionImpl.State.OPEN).when(transaction).getState(); CompletableFuture completableFuture = CompletableFuture.completedFuture(null); doNothing().when(transaction).registerAckOp(any()); + doReturn(true).when(transaction).checkIfOpen(any()); doReturn(completableFuture).when(transaction).registerAckedTopic(any(), any()); Thread.sleep(1000 * 3); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerDedupPermitsUpdateTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerDedupPermitsUpdateTest.java index 4c9922acbec06..ceb7d7fd4844c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerDedupPermitsUpdateTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ConsumerDedupPermitsUpdateTest.java @@ -116,10 +116,23 @@ public void testConsumerDedup(boolean batchingEnabled, int receiverQueueSize) th } producer.flush(); - for (int i = 0; i < 30; i++) { - Message msg = consumer.receive(); - assertEquals(msg.getValue(), "new-message-" + i); - consumer.acknowledge(msg); + if (batchingEnabled) { + for (int i = 0; i < 30; i++) { + Message msg = consumer.receive(); + assertEquals(msg.getValue(), "hello-" + i); + consumer.acknowledge(msg); + } + for (int i = 0; i < 30; i++) { + Message msg = consumer.receive(); + assertEquals(msg.getValue(), "new-message-" + i); + consumer.acknowledge(msg); + } + } else { + for (int i = 0; i < 30; i++) { + Message msg = consumer.receive(); + assertEquals(msg.getValue(), "new-message-" + i); + consumer.acknowledge(msg); + } } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeySharedSubscriptionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeySharedSubscriptionTest.java new file mode 100644 index 0000000000000..36d3bf6c44076 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/KeySharedSubscriptionTest.java @@ -0,0 +1,189 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import lombok.Cleanup; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.SubscriptionType; +import org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +@Test(groups = "broker-impl") +public class KeySharedSubscriptionTest extends ProducerConsumerBase { + + @Override + @BeforeMethod + protected void setup() throws Exception { + conf.setMaxUnackedMessagesPerConsumer(10); + super.internalSetup(); + super.producerBaseSetup(); + } + + @Override + @AfterMethod(alwaysRun = true) + protected void cleanup() throws Exception { + super.internalCleanup(); + } + + @DataProvider + public Object[][] subType() { + return new Object[][] { { SubscriptionType.Shared }, { SubscriptionType.Key_Shared } }; + } + + @Test(dataProvider = "subType") + public void testCanRecoverConsumptionWhenLiftMaxUnAckedMessagesRestriction(SubscriptionType subscriptionType) + throws PulsarClientException { + PulsarClient pulsarClient = PulsarClient.builder(). + serviceUrl(lookupUrl.toString()) + .build(); + final int totalMsg = 1000; + String topic = "broker-close-test-" + RandomStringUtils.randomAlphabetic(5); + Map, List> nameToId = Maps.newConcurrentMap(); + Set pubMessages = Sets.newConcurrentHashSet(); + Set recMessages = Sets.newConcurrentHashSet(); + AtomicLong lastActiveTime = new AtomicLong(); + AtomicBoolean canAcknowledgement = new AtomicBoolean(false); + + @Cleanup + Consumer consumer1 = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName("sub-1") + .subscriptionType(subscriptionType) + .consumerName("con-1") + .messageListener((cons1, msg) -> { + lastActiveTime.set(System.currentTimeMillis()); + nameToId.computeIfAbsent(cons1,(k) -> new ArrayList<>()) + .add(msg.getMessageId()); + recMessages.add(msg.getMessageId()); + if (canAcknowledgement.get()) { + try { + cons1.acknowledge(msg); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + } + }) + .subscribe(); + @Cleanup + Consumer consumer2 = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName("sub-1") + .subscriptionType(subscriptionType) + .messageListener((cons2, msg) -> { + lastActiveTime.set(System.currentTimeMillis()); + nameToId.computeIfAbsent(cons2,(k) -> new ArrayList<>()) + .add(msg.getMessageId()); + recMessages.add(msg.getMessageId()); + if (canAcknowledgement.get()) { + try { + cons2.acknowledge(msg); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + } + }) + .consumerName("con-2") + .subscribe(); + @Cleanup + Consumer consumer3 = pulsarClient.newConsumer() + .topic(topic) + .subscriptionName("sub-1") + .subscriptionType(subscriptionType) + .messageListener((cons3, msg) -> { + lastActiveTime.set(System.currentTimeMillis()); + nameToId.computeIfAbsent(cons3,(k) -> new ArrayList<>()) + .add(msg.getMessageId()); + recMessages.add(msg.getMessageId()); + if (canAcknowledgement.get()) { + try { + cons3.acknowledge(msg); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + } + }) + .consumerName("con-3") + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .enableBatching(true) + .batchingMaxPublishDelay(1, TimeUnit.MILLISECONDS) + // We chose 9 because the maximum unacked message is 10 + .batchingMaxMessages(9) + .create(); + + for (int i = 0; i < totalMsg; i++) { + producer.sendAsync(UUID.randomUUID().toString() + .getBytes(StandardCharsets.UTF_8)) + .thenAccept(pubMessages::add); + } + + // Wait for all consumers can not read more messages. the consumers are stuck by max unacked messages. + Awaitility.await() + .pollDelay(5, TimeUnit.SECONDS) + .until(() -> + (System.currentTimeMillis() - lastActiveTime.get()) > TimeUnit.SECONDS.toMillis(5)); + + // All consumers can acknowledge messages as they continue to receive messages. + canAcknowledgement.set(true); + + // Acknowledgment of currently received messages to get out of stuck state due to unack message + for (Map.Entry, List> entry : nameToId.entrySet()) { + Consumer consumer = entry.getKey(); + consumer.acknowledge(entry.getValue()); + } + // refresh active time + lastActiveTime.set(System.currentTimeMillis()); + + // Wait for all consumers to continue receiving messages. + Awaitility.await() + .pollDelay(5, TimeUnit.SECONDS) + .until(() -> + (System.currentTimeMillis() - lastActiveTime.get()) > TimeUnit.SECONDS.toMillis(5)); + + //Determine if all messages have been received. + //If the dispatcher is stuck, we can not receive enough messages. + Assert.assertEquals(pubMessages.size(), totalMsg); + Assert.assertEquals(pubMessages.size(), recMessages.size()); + Assert.assertTrue(recMessages.containsAll(pubMessages)); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java index d4eab77a7c21f..fc935a2b7f688 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MessageChunkingTest.java @@ -20,6 +20,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import com.google.common.collect.Lists; @@ -27,17 +28,21 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Random; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import lombok.Cleanup; import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; +import org.apache.commons.lang3.RandomUtils; import org.apache.pulsar.broker.service.persistent.PersistentTopic; +import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; @@ -47,6 +52,7 @@ import org.apache.pulsar.client.api.ProducerBuilder; import org.apache.pulsar.client.api.ProducerConsumerBase; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.SizeUnit; import org.apache.pulsar.client.impl.MessageImpl.SchemaState; import org.apache.pulsar.client.impl.ProducerImpl.OpSendMsg; import org.apache.pulsar.common.api.proto.MessageMetadata; @@ -57,6 +63,7 @@ import org.apache.pulsar.common.util.FutureUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; @@ -162,6 +169,29 @@ public void testLargeMessage(boolean ackReceiptEnabled) throws Exception { } + @Test + public void testChunkingWithOrderingKey() throws Exception { + this.conf.setMaxMessageSize(5); + + final String topicName = "persistent://my-property/my-ns/testChunkingWithOrderingKey"; + + @Cleanup + Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-subscriber-name") + .acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer().topic(topicName).enableChunking(true) + .enableBatching(false).create(); + + byte[] data = RandomUtils.nextBytes(20); + byte[] ok = RandomUtils.nextBytes(10); + producer.newMessage().value(data).orderingKey(ok).send(); + + Message msg = consumer.receive(); + Assert.assertEquals(msg.getData(), data); + Assert.assertEquals(msg.getOrderingKey(), ok); + } + @Test(dataProvider = "ackReceiptEnabled") public void testLargeMessageAckTimeOut(boolean ackReceiptEnabled) throws Exception { @@ -369,6 +399,44 @@ public void testExpireIncompleteChunkMessage() throws Exception{ producer = null; // clean reference of mocked producer } + @Test + public void testChunksEnqueueFailed() throws Exception { + final String topicName = "persistent://my-property/my-ns/test-chunks-enqueue-failed"; + log.info("-- Starting {} test --", methodName); + this.conf.setMaxMessageSize(5); + + final MemoryLimitController controller = ((PulsarClientImpl) pulsarClient).getMemoryLimitController(); + assertEquals(controller.currentUsage(), 0); + + final int maxPendingMessages = 10; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topicName) + .maxPendingMessages(maxPendingMessages) + .enableChunking(true) + .enableBatching(false) + .create(); + assertTrue(producer instanceof ProducerImpl); + Semaphore semaphore = ((ProducerImpl) producer).getSemaphore().orElse(null); + assertNotNull(semaphore); + assertEquals(semaphore.availablePermits(), maxPendingMessages); + producer.send(createMessagePayload(1).getBytes()); + try { + producer.send(createMessagePayload(100).getBytes(StandardCharsets.UTF_8)); + fail("It should fail with ProducerQueueIsFullError"); + } catch (PulsarClientException e) { + assertTrue(e instanceof PulsarClientException.ProducerQueueIsFullError); + assertEquals(controller.currentUsage(), 0); + assertEquals(semaphore.availablePermits(), maxPendingMessages); + } + } + + @Override + protected void customizeNewPulsarClientBuilder(ClientBuilder clientBuilder) { + clientBuilder.memoryLimit(10000L, SizeUnit.BYTES); + } + private String createMessagePayload(int size) { StringBuilder str = new StringBuilder(); Random rand = new Random(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java index 31a426e9f9dc8..6b6bf9594836a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/MultiTopicsReaderTest.java @@ -26,15 +26,19 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.broker.service.StickyKeyConsumerSelector; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -43,12 +47,12 @@ import org.apache.pulsar.client.api.MessageRoutingMode; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerBuilder; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Range; import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.RetentionPolicies; @@ -57,16 +61,17 @@ import org.apache.pulsar.common.util.Murmur3_32Hash; import org.awaitility.Awaitility; import org.testng.Assert; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeMethod; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; +@Slf4j @Test(groups = "flaky") public class MultiTopicsReaderTest extends MockedPulsarServiceBaseTest { private static final String subscription = "reader-multi-topics-sub"; - @BeforeMethod(alwaysRun = true) + @BeforeClass(alwaysRun = true) @Override protected void setup() throws Exception { super.internalSetup(); @@ -82,7 +87,7 @@ protected void setup() throws Exception { admin.namespaces().createNamespace("my-property/my-ns", policies); } - @AfterMethod(alwaysRun = true) + @AfterClass(alwaysRun = true) @Override protected void cleanup() throws Exception { super.internalCleanup(); @@ -122,6 +127,67 @@ public void testReadMessageWithBatching() throws Exception { testReadMessages(topic, true); } + @Test(timeOut = 10000) + public void testHasMessageAvailableAsync() throws Exception { + String topic = "persistent://my-property/my-ns/testHasMessageAvailableAsync"; + String content = "my-message-"; + int msgNum = 10; + admin.topics().createPartitionedTopic(topic, 2); + // stop retention from cleaning up + pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").subscribe().close(); + + try (Reader reader = pulsarClient.newReader().topic(topic).readCompacted(true) + .startMessageId(MessageId.earliest).create()) { + Assert.assertFalse(reader.hasMessageAvailable()); + Assert.assertFalse(reader.hasMessageAvailableAsync().get(10, TimeUnit.SECONDS)); + } + + try (Reader reader = pulsarClient.newReader() + .topic(topic).startMessageId(MessageId.earliest).create()) { + try (Producer producer = pulsarClient.newProducer().topic(topic).create()) { + for (int i = 0; i < msgNum; i++) { + producer.newMessage().key(content + i) + .value((content + i).getBytes(StandardCharsets.UTF_8)).send(); + } + } + // Should have message available + Assert.assertTrue(reader.hasMessageAvailableAsync().get()); + try { + // Should have message available too + Assert.assertTrue(reader.hasMessageAvailable()); + } catch (PulsarClientException e) { + fail("Expect success but failed.", e); + } + List> msgs = Collections.synchronizedList(new ArrayList<>()); + CountDownLatch latch = new CountDownLatch(1); + readMessageUseAsync(reader, msgs, latch); + latch.await(); + Assert.assertEquals(msgs.size(), msgNum); + } + } + + private static void readMessageUseAsync(Reader reader, List> msgs, CountDownLatch latch) { + reader.hasMessageAvailableAsync().thenAccept(hasMessageAvailable -> { + if (hasMessageAvailable) { + reader.readNextAsync().whenComplete((msg, ex) -> { + if (ex != null) { + log.error("Read message failed.", ex); + latch.countDown(); + return; + } + msgs.add(msg); + readMessageUseAsync(reader, msgs, latch); + }); + } else { + latch.countDown(); + } + }).exceptionally(throwable -> { + log.error("Read message failed.", throwable); + latch.countDown(); + return null; + }); + } + @Test(timeOut = 10000) public void testReadMessageWithBatchingWithMessageInclusive() throws Exception { String topic = "persistent://my-property/my-ns/my-reader-topic-with-batching-inclusive" + UUID.randomUUID(); @@ -378,6 +444,107 @@ public void testMultiTopic() throws Exception { Awaitility.await().untilAsserted(() -> assertEquals(client.consumersCount(), 0)); } + @Test(timeOut = 20000) + public void testMultiNonPartitionedTopicWithStartMessageId() throws Exception { + final String topic1 = "persistent://my-property/my-ns/topic1" + UUID.randomUUID(); + final String topic2 = "persistent://my-property/my-ns/topic2" + UUID.randomUUID(); + List topics = Arrays.asList(topic1, topic2); + PulsarClientImpl client = (PulsarClientImpl) pulsarClient; + + // create producer and send msg + List> producerList = new ArrayList<>(); + for (String topicName : topics) { + producerList.add(pulsarClient.newProducer(Schema.STRING).topic(topicName).create()); + } + int msgNum = 10; + Set messages = new HashSet<>(); + for (int i = 0; i < producerList.size(); i++) { + Producer producer = producerList.get(i); + for (int j = 0; j < msgNum; j++) { + String msg = i + "msg" + j; + producer.send(msg); + messages.add(msg); + } + } + Reader reader = pulsarClient.newReader(Schema.STRING) + .startMessageId(MessageId.earliest) + .topics(topics).readerName("my-reader").create(); + // receive messages + while (reader.hasMessageAvailable()) { + messages.remove(reader.readNext(5, TimeUnit.SECONDS).getValue()); + } + assertEquals(messages.size(), 0); + assertEquals(client.consumersCount(), 1); + // clean up + for (Producer producer : producerList) { + producer.close(); + } + reader.close(); + Awaitility.await().untilAsserted(() -> assertEquals(client.consumersCount(), 0)); + } + + @Test(timeOut = 20000) + public void testMultiNonPartitionedTopicWithRollbackDuration() throws Exception { + final String topic1 = "persistent://my-property/my-ns/topic1" + UUID.randomUUID(); + final String topic2 = "persistent://my-property/my-ns/topic2" + UUID.randomUUID(); + List topics = Arrays.asList(topic1, topic2); + PulsarClientImpl client = (PulsarClientImpl) pulsarClient; + + // create producer and send msg + List> producerList = new ArrayList<>(); + for (String topicName : topics) { + producerList.add(pulsarClient.newProducer(Schema.STRING).topic(topicName).create()); + } + int totalMsg = 10; + Set messages = new HashSet<>(); + long oldMsgPublishTime = System.currentTimeMillis() - TimeUnit.HOURS.toMillis(5); // 5 hours old + long newMsgPublishTime = System.currentTimeMillis() - TimeUnit.HOURS.toMillis(1); // 5 hours old + for (int i = 0; i < producerList.size(); i++) { + Producer producer = producerList.get(i); + // (1) Publish 10 messages with publish-time 5 HOUR back + for (int j = 0; j < totalMsg; j++) { + TypedMessageBuilderImpl msg = (TypedMessageBuilderImpl) producer.newMessage() + .value(i + "-old-msg-" + j); + msg.getMetadataBuilder() + .setPublishTime(oldMsgPublishTime) + .setProducerName(producer.getProducerName()) + .setReplicatedFrom("us-west1"); + msg.send(); + messages.add(msg.getMessage().getValue()); + } + // (2) Publish 10 messages with publish-time 1 HOUR back + for (int j = 0; j < totalMsg; j++) { + TypedMessageBuilderImpl msg = (TypedMessageBuilderImpl) producer.newMessage() + .value(i + "-new-msg-" + j); + msg.getMetadataBuilder() + .setPublishTime(newMsgPublishTime) + .setProducerName(producer.getProducerName()) + .setReplicatedFrom("us-west1"); + msg.send(); + messages.add(msg.getMessage().getValue()); + } + } + + Reader reader = pulsarClient.newReader(Schema.STRING) + .startMessageFromRollbackDuration(2, TimeUnit.HOURS) + .topics(topics).readerName("my-reader").create(); + // receive messages + while (reader.hasMessageAvailable()) { + messages.remove(reader.readNext(5, TimeUnit.SECONDS).getValue()); + } + assertEquals(messages.size(), 2 * totalMsg); + for (String message : messages) { + assertTrue(message.contains("old-msg")); + } + assertEquals(client.consumersCount(), 1); + // clean up + for (Producer producer : producerList) { + producer.close(); + } + reader.close(); + Awaitility.await().untilAsserted(() -> assertEquals(client.consumersCount(), 0)); + } + @Test(timeOut = 10000) public void testKeyHashRangeReader() throws Exception { final List keys = Arrays.asList("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java index 5eb43af38f771..de130b7827078 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java @@ -23,6 +23,7 @@ import java.util.HashSet; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import lombok.Cleanup; @@ -35,6 +36,7 @@ import org.apache.pulsar.client.api.ProducerConsumerBase; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionType; +import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; @@ -154,4 +156,79 @@ public void testNegativeAcks(boolean batching, boolean usePartitions, Subscripti consumer.close(); producer.close(); } + + @Test + public void testFailoverConsumerBatchCumulateAck() throws Exception { + final String topic = BrokerTestUtil.newUniqueName("my-topic"); + admin.topics().createPartitionedTopic(topic, 2); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.INT32) + .topic(topic) + .subscriptionName("sub") + .subscriptionType(SubscriptionType.Failover) + .enableBatchIndexAcknowledgment(true) + .acknowledgmentGroupTime(100, TimeUnit.MILLISECONDS) + .receiverQueueSize(10) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.INT32) + .topic(topic) + .batchingMaxMessages(10) + .batchingMaxPublishDelay(3, TimeUnit.SECONDS) + .blockIfQueueFull(true) + .create(); + + int count = 0; + Set datas = new HashSet<>(); + CountDownLatch producerLatch = new CountDownLatch(10); + while (count < 10) { + datas.add(count); + producer.sendAsync(count).whenComplete((m, e) -> { + producerLatch.countDown(); + }); + count++; + } + producerLatch.await(); + CountDownLatch consumerLatch = new CountDownLatch(1); + new Thread(new Runnable() { + @Override + public void run() { + consumer.receiveAsync() + .thenCompose(m -> { + log.info("received one msg : {}", m.getMessageId()); + datas.remove(m.getValue()); + return consumer.acknowledgeCumulativeAsync(m); + }) + .thenAccept(ignore -> { + try { + Thread.sleep(500); + consumer.redeliverUnacknowledgedMessages(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }) + .whenComplete((r, e) -> { + consumerLatch.countDown(); + }); + } + }).start(); + consumerLatch.await(); + Thread.sleep(500); + count = 0; + while(true) { + Message msg = consumer.receive(5, TimeUnit.SECONDS); + if (msg == null) { + break; + } + consumer.acknowledgeCumulative(msg); + Thread.sleep(200); + datas.remove(msg.getValue()); + log.info("received msg : {}", msg.getMessageId()); + count++; + } + Assert.assertEquals(count, 9); + Assert.assertEquals(0, datas.size()); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java index 264ec306413cd..77e3ee811a714 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerMemoryLimitTest.java @@ -69,6 +69,35 @@ public void testProducerTimeoutMemoryRelease() throws Exception { } + @Test(timeOut = 10_000) + public void testProducerBatchSendTimeoutMemoryRelease() throws Exception { + initClientWithMemoryLimit(); + @Cleanup + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer() + .topic("testProducerMemoryLimit") + .sendTimeout(2, TimeUnit.SECONDS) + .maxPendingMessages(0) + .enableBatching(true) + .batchingMaxPublishDelay(3000, TimeUnit.MILLISECONDS) + .batchingMaxBytes(12) + .create(); + this.stopBroker(); + try { + producer.newMessage().value("memory-test".getBytes(StandardCharsets.UTF_8)).sendAsync(); + try { + producer.newMessage().value("memory-test".getBytes(StandardCharsets.UTF_8)).sendAsync().get(); + } catch (Exception e) { + throw PulsarClientException.unwrap(e); + } + + throw new IllegalStateException("can not reach here"); + } catch (PulsarClientException.TimeoutException ex) { + PulsarClientImpl clientImpl = (PulsarClientImpl) this.pulsarClient; + final MemoryLimitController memoryLimitController = clientImpl.getMemoryLimitController(); + Assert.assertEquals(memoryLimitController.currentUsage(), 0); + } + } + @Test(timeOut = 10_000) public void testProducerCloseMemoryRelease() throws Exception { initClientWithMemoryLimit(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerSemaphoreTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerSemaphoreTest.java index c719cbda6f0be..de858c8d2bdd8 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerSemaphoreTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ProducerSemaphoreTest.java @@ -18,6 +18,10 @@ */ package org.apache.pulsar.client.impl; +import static org.mockito.ArgumentMatchers.any; +import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.TimeUnit; import lombok.Cleanup; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.ProducerConsumerBase; @@ -25,6 +29,7 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.util.FutureUtil; +import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -36,8 +41,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @Test(groups = "broker-impl") public class ProducerSemaphoreTest extends ProducerConsumerBase { @@ -55,6 +58,39 @@ public void cleanup() throws Exception { super.internalCleanup(); } + @Test(timeOut = 10_000) + public void testProducerSemaphoreInvalidMessage() throws Exception { + final int pendingQueueSize = 100; + + @Cleanup + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer() + .topic("testProducerSemaphoreAcquire") + .maxPendingMessages(pendingQueueSize) + .enableBatching(true) + .create(); + + this.stopBroker(); + + Field maxMessageSizeFiled = ClientCnx.class.getDeclaredField("maxMessageSize"); + maxMessageSizeFiled.setAccessible(true); + maxMessageSizeFiled.set(null, 2); + + try { + producer.send("semaphore-test".getBytes(StandardCharsets.UTF_8)); + Assert.fail("can not reach here"); + } catch (PulsarClientException.InvalidMessageException ex) { + Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + } + + producer.conf.setBatchingEnabled(false); + try { + producer.send("semaphore-test".getBytes(StandardCharsets.UTF_8)); + Assert.fail("can not reach here"); + } catch (PulsarClientException.InvalidMessageException ex) { + Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + } + } + @Test(timeOut = 30000) public void testProducerSemaphoreAcquireAndRelease() throws PulsarClientException, ExecutionException, InterruptedException { @@ -75,11 +111,13 @@ public void testProducerSemaphoreAcquireAndRelease() throws PulsarClientExceptio futures.add(producer.newMessage().value(("Semaphore-test-" + i).getBytes()).sendAsync()); } Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize - messages); + Assert.assertFalse(producer.isErrorStat()); } finally { producer.getClientCnx().channel().config().setAutoRead(true); } FutureUtil.waitForAll(futures).get(); Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + Assert.assertFalse(producer.isErrorStat()); futures.clear(); // Simulate replicator, non batching message but `numMessagesInBatch` of message metadata > 1 @@ -92,15 +130,18 @@ public void testProducerSemaphoreAcquireAndRelease() throws PulsarClientExceptio futures.add(producer.sendAsync(msg)); } Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize - messages/2); + Assert.assertFalse(producer.isErrorStat()); } finally { producer.getClientCnx().channel().config().setAutoRead(true); } FutureUtil.waitForAll(futures).get(); Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + Assert.assertFalse(producer.isErrorStat()); futures.clear(); // Here must ensure that the semaphore available permits is 0 Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + Assert.assertFalse(producer.isErrorStat()); // Acquire 5 and not wait the send ack call back producer.getClientCnx().channel().config().setAutoRead(false); @@ -111,12 +152,14 @@ public void testProducerSemaphoreAcquireAndRelease() throws PulsarClientExceptio // Here must ensure that the Semaphore a acquired 5 Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize - messages / 2); + Assert.assertFalse(producer.isErrorStat()); } finally { producer.getClientCnx().channel().config().setAutoRead(true); } FutureUtil.waitForAll(futures).get(); Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + Assert.assertFalse(producer.isErrorStat()); } /** @@ -141,6 +184,7 @@ public void testEnsureNotBlockOnThePendingQueue() throws Exception { // Test that when we fill the queue with "replicator" messages, we are notified // (replicator itself would block) Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + Assert.assertFalse(producer.isErrorStat()); producer.getClientCnx().channel().config().setAutoRead(false); try { for (int i = 0; i < pendingQueueSize; i++) { @@ -151,6 +195,7 @@ public void testEnsureNotBlockOnThePendingQueue() throws Exception { futures.add(producer.sendAsync(msg)); } Assert.assertEquals(producer.getSemaphore().get().availablePermits(), 0); + Assert.assertFalse(producer.isErrorStat()); try { MessageMetadata metadata = new MessageMetadata() .setNumMessagesInBatch(10); @@ -162,6 +207,7 @@ public void testEnsureNotBlockOnThePendingQueue() throws Exception { Assert.assertEquals(ee.getCause().getClass(), PulsarClientException.ProducerQueueIsFullError.class); Assert.assertEquals(producer.getSemaphore().get().availablePermits(), 0); + Assert.assertFalse(producer.isErrorStat()); } } finally { producer.getClientCnx().channel().config().setAutoRead(true); @@ -171,12 +217,14 @@ public void testEnsureNotBlockOnThePendingQueue() throws Exception { // Test that when we fill the queue with normal messages, we get an error Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + Assert.assertFalse(producer.isErrorStat()); producer.getClientCnx().channel().config().setAutoRead(false); try { for (int i = 0; i < pendingQueueSize; i++) { futures.add(producer.newMessage().value(("Semaphore-test-" + i).getBytes()).sendAsync()); } Assert.assertEquals(producer.getSemaphore().get().availablePermits(), 0); + Assert.assertFalse(producer.isErrorStat()); try { producer.newMessage().value(("Semaphore-test-Q-full").getBytes()).sendAsync().get(); @@ -184,6 +232,7 @@ public void testEnsureNotBlockOnThePendingQueue() throws Exception { Assert.assertEquals(ee.getCause().getClass(), PulsarClientException.ProducerQueueIsFullError.class); Assert.assertEquals(producer.getSemaphore().get().availablePermits(), 0); + Assert.assertFalse(producer.isErrorStat()); } } finally { @@ -191,5 +240,46 @@ public void testEnsureNotBlockOnThePendingQueue() throws Exception { } FutureUtil.waitForAll(futures).get(); Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + Assert.assertFalse(producer.isErrorStat()); + } + + @Test(timeOut = 10_000) + public void testBatchMessageSendTimeoutProducerSemaphoreRelease() throws Exception { + final int pendingQueueSize = 10; + @Cleanup + ProducerImpl producer = + (ProducerImpl) pulsarClient.newProducer() + .topic("testProducerSemaphoreRelease") + .sendTimeout(2, TimeUnit.SECONDS) + .maxPendingMessages(pendingQueueSize) + .enableBatching(true) + .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS) + .batchingMaxBytes(15) + .create(); + this.stopBroker(); + try { + ProducerImpl spyProducer = Mockito.spy(producer); + // Make the pendingMessages not empty + spyProducer.newMessage().value("semaphore-test".getBytes(StandardCharsets.UTF_8)).sendAsync(); + spyProducer.newMessage().value("semaphore-test".getBytes(StandardCharsets.UTF_8)).sendAsync(); + + Field batchMessageContainerField = ProducerImpl.class.getDeclaredField("batchMessageContainer"); + batchMessageContainerField.setAccessible(true); + BatchMessageContainerImpl batchMessageContainer = + (BatchMessageContainerImpl) batchMessageContainerField.get(spyProducer); + batchMessageContainer.setProducer(spyProducer); + Mockito.doThrow(new PulsarClientException.CryptoException("crypto error")).when(spyProducer) + .encryptMessage(any(), any()); + + try { + spyProducer.newMessage().value("memory-test".getBytes(StandardCharsets.UTF_8)).sendAsync().get(); + } catch (Exception e) { + throw PulsarClientException.unwrap(e); + } + + throw new IllegalStateException("can not reach here"); + } catch (PulsarClientException.TimeoutException ex) { + Assert.assertEquals(producer.getSemaphore().get().availablePermits(), pendingQueueSize); + } } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarClientConfigurationOverrideTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarClientConfigurationOverrideTest.java new file mode 100644 index 0000000000000..4f885ecc46b07 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarClientConfigurationOverrideTest.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.client.api.ClientBuilder; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.internal.PropertiesUtils; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.util.Map; + +public class PulsarClientConfigurationOverrideTest { + @Test + public void testFilterAndMapProperties() { + // Create a default config + ServiceConfiguration conf = new ServiceConfiguration(); + conf.getProperties().setProperty("keepAliveIntervalSeconds", "15"); + conf.getProperties().setProperty("brokerClient_keepAliveIntervalSeconds", "25"); + + // Apply the filtering and mapping logic + Map result = PropertiesUtils.filterAndMapProperties(conf.getProperties(), "brokerClient_"); + + // Ensure the results match expectations + Assert.assertEquals(result.size(), 1, "The filtered map should have one entry."); + Assert.assertNull(result.get("brokerClient_keepAliveIntervalSeconds"), + "The mapped prop should not be in the result."); + Assert.assertEquals(result.get("keepAliveIntervalSeconds"), "25", "The original value is overridden."); + + // Create sample ClientBuilder + ClientBuilder builder = PulsarClient.builder(); + Assert.assertEquals( + ((ClientBuilderImpl) builder).getClientConfigurationData().getKeepAliveIntervalSeconds(), 30); + // Note: this test would fail if any @Secret fields were set before the loadConf and the accessed afterwards. + builder.loadConf(result); + Assert.assertEquals( + ((ClientBuilderImpl) builder).getClientConfigurationData().getKeepAliveIntervalSeconds(), 25); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java index eebcf5b68c9d4..8136cf07c345a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/PulsarTestClient.java @@ -151,11 +151,11 @@ public boolean add(OpSendMsg opSendMsg) { } @Override - protected boolean shouldWriteOpSendMsg() { + protected ClientCnx getCnxIfReady() { if (dropOpSendMessages) { - return false; + return null; } else { - return super.shouldWriteOpSendMsg(); + return super.getCnxIfReady(); } } }; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java index de9eb80a26c14..75aa3ee594131 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RawReaderTest.java @@ -107,6 +107,58 @@ public static String extractKey(RawMessage m) { return msgMetadata.getPartitionKey(); } + @Test + public void testHasMessageAvailableWithoutBatch() throws Exception { + int numKeys = 10; + String topic = "persistent://my-property/my-ns/my-raw-topic"; + Set keys = publishMessages(topic, numKeys); + RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); + while (true) { + boolean hasMsg = reader.hasMessageAvailableAsync().get(); + if (hasMsg && keys.isEmpty()) { + Assert.fail("HasMessageAvailable shows still has message when there is no message"); + } + if (hasMsg) { + try (RawMessage m = reader.readNextAsync().get()) { + Assert.assertTrue(keys.remove(extractKey(m))); + } + } else { + break; + } + } + Assert.assertTrue(keys.isEmpty()); + } + + @Test + public void testHasMessageAvailableWithBatch() throws Exception { + int numKeys = 20; + String topic = "persistent://my-property/my-ns/my-raw-topic"; + Set keys = publishMessages(topic, numKeys, true); + RawReader reader = RawReader.create(pulsarClient, topic, subscription).get(); + int messageCount = 0; + while (true) { + boolean hasMsg = reader.hasMessageAvailableAsync().get(); + if (hasMsg && (messageCount == numKeys)) { + Assert.fail("HasMessageAvailable shows still has message when there is no message"); + } + if (hasMsg) { + try (RawMessage m = reader.readNextAsync().get()) { + MessageMetadata meta = Commands.parseMessageMetadata(m.getHeadersAndPayload()); + messageCount += meta.getNumMessagesInBatch(); + RawBatchConverter.extractIdsAndKeysAndSize(m).forEach(batchInfo -> { + String key = batchInfo.getMiddle(); + Assert.assertTrue(keys.remove(key)); + }); + + } + } else { + break; + } + } + Assert.assertEquals(messageCount, numKeys); + Assert.assertTrue(keys.isEmpty()); + } + @Test public void testRawReader() throws Exception { int numKeys = 10; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java index 205264699eb90..53ae8f970085d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/ReaderTest.java @@ -36,7 +36,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; - import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; @@ -55,6 +54,8 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats; +import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicStats; @@ -602,4 +603,22 @@ public void removeNonPersistentTopicReaderTest() throws Exception { }); } + @Test + public void testReaderCursorStatsCorrect() throws Exception { + final String readerNotAckTopic = "persistent://my-property/my-ns/testReaderCursorStatsCorrect"; + @Cleanup + Reader reader = pulsarClient.newReader() + .topic(readerNotAckTopic) + .startMessageId(MessageId.earliest) + .create(); + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(readerNotAckTopic); + Assert.assertEquals(internalStats.cursors.size(), 1); + String key = new ArrayList<>(internalStats.cursors.keySet()).get(0); + ManagedLedgerInternalStats.CursorStats cursor = internalStats.cursors.get(key); + Assert.assertEquals(cursor.state, "Open"); + reader.close(); + internalStats = admin.topics().getInternalStats(readerNotAckTopic); + Assert.assertEquals(internalStats.cursors.size(), 0); + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RetryUtilTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RetryUtilTest.java index e17f376fcafb8..f5544307c3711 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RetryUtilTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/RetryUtilTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.client.impl; import org.apache.pulsar.client.util.RetryUtil; +import org.apache.pulsar.common.util.FutureUtil; import org.testng.annotations.Test; import java.util.concurrent.CompletableFuture; @@ -45,11 +46,14 @@ public void testFailAndRetry() throws Exception { .setMandatoryStop(5000, TimeUnit.MILLISECONDS) .create(); RetryUtil.retryAsynchronously(() -> { + CompletableFuture future = new CompletableFuture<>(); atomicInteger.incrementAndGet(); if (atomicInteger.get() < 5) { - throw new RuntimeException("fail"); + future.completeExceptionally(new RuntimeException("fail")); + } else { + future.complete(true); } - return true; + return future; }, backoff, executor, callback); assertTrue(callback.get()); assertEquals(atomicInteger.get(), 5); @@ -66,9 +70,8 @@ public void testFail() throws Exception { .setMandatoryStop(5000, TimeUnit.MILLISECONDS) .create(); long start = System.currentTimeMillis(); - RetryUtil.retryAsynchronously(() -> { - throw new RuntimeException("fail"); - }, backoff, executor, callback); + RetryUtil.retryAsynchronously(() -> + FutureUtil.failedFuture(new RuntimeException("fail")), backoff, executor, callback); try { callback.get(); } catch (Exception e) { diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionClientConnectTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionClientConnectTest.java new file mode 100644 index 0000000000000..7fb924f49b885 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionClientConnectTest.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeoutException; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; +import org.apache.pulsar.broker.TransactionMetadataStoreService; +import org.apache.pulsar.broker.transaction.TransactionTestBase; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException; +import org.apache.pulsar.client.api.transaction.TxnID; +import org.apache.pulsar.client.impl.transaction.TransactionCoordinatorClientImpl; +import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; +import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; +import org.apache.pulsar.transaction.coordinator.TransactionMetadataStoreState; +import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; +import org.awaitility.Awaitility; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertFalse; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +@Slf4j +public class TransactionClientConnectTest extends TransactionTestBase { + + private static final String RECONNECT_TOPIC = NAMESPACE1 + "/txn-client-reconnect-test"; + private static final int NUM_PARTITIONS = 1; + @BeforeMethod(alwaysRun = true) + public void setup() throws Exception { + setUpBase(1, NUM_PARTITIONS, RECONNECT_TOPIC, 0); + admin.topics().createSubscription(RECONNECT_TOPIC, "test", MessageId.latest); + } + + @AfterMethod(alwaysRun = true) + protected void cleanup() { + super.internalCleanup(); + } + + @Test + public void testTransactionNewReconnect() throws Exception { + Callable> callable = () -> pulsarClient.newTransaction() + .withTransactionTimeout(200, TimeUnit.MILLISECONDS).build(); + tryCommandReconnect(callable, callable); + } + + @Test + public void testTransactionAddSubscriptionToTxnAsyncReconnect() throws Exception { + TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); + Callable> callable = () -> transactionCoordinatorClient + .addSubscriptionToTxnAsync(new TxnID(0, 0), "test", "test"); + tryCommandReconnect(callable, callable); + } + + public void tryCommandReconnect(Callable> callable1, Callable> callable2) + throws Exception { + start(); + try { + callable1.call().get(); + } catch (ExecutionException e) { + assertFalse(e.getCause() instanceof TransactionCoordinatorClientException.CoordinatorNotFoundException); + waitToReady(); + callable1.call().get(); + } + fence(getPulsarServiceList().get(0).getTransactionMetadataStoreService()); + CompletableFuture completableFuture = callable2.call(); + try { + completableFuture.get(3, TimeUnit.SECONDS); + } catch (TimeoutException ignore) { + } catch (ExecutionException e) { + Assert.assertFalse(e.getCause() + instanceof TransactionCoordinatorClientException.CoordinatorNotFoundException); + } + + unFence(getPulsarServiceList().get(0).getTransactionMetadataStoreService()); + completableFuture.get(); + } + + @Test + public void testTransactionAbortToTxnAsyncReconnect() throws Exception { + TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); + Callable> callable1 = () -> transactionCoordinatorClient.abortAsync(new TxnID(0, + 0)); + Callable> callable2 = () -> transactionCoordinatorClient.abortAsync(new TxnID(0, + 1)); + tryCommandReconnect(callable1, callable2); + } + + @Test + public void testTransactionCommitToTxnAsyncReconnect() throws Exception { + TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); + Callable> callable1 = () -> transactionCoordinatorClient.commitAsync(new TxnID(0, + 0)); + Callable> callable2 = () -> transactionCoordinatorClient.commitAsync(new TxnID(0, + 1)); + tryCommandReconnect(callable1, callable2); + } + + @Test + public void testTransactionAddPublishPartitionToTxnReconnect() throws Exception { + TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); + Callable> callable = () -> transactionCoordinatorClient.addPublishPartitionToTxnAsync(new TxnID(0, 0), + Collections.singletonList("test")); + tryCommandReconnect(callable, callable); + } + + @Test + public void testPulsarClientCloseThenCloseTcClient() throws Exception { + TransactionCoordinatorClientImpl transactionCoordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient(); + Field field = TransactionCoordinatorClientImpl.class.getDeclaredField("handlers"); + field.setAccessible(true); + TransactionMetaStoreHandler[] handlers = + (TransactionMetaStoreHandler[]) field.get(transactionCoordinatorClient); + + for (TransactionMetaStoreHandler handler : handlers) { + handler.newTransactionAsync(10, TimeUnit.SECONDS).get(); + } + for (TransactionMetaStoreHandler handler : handlers) { + Field stateField = HandlerState.class.getDeclaredField("state"); + stateField.setAccessible(true); + stateField.set(handler, HandlerState.State.Closed); + } + for (TransactionMetaStoreHandler handler : handlers) { + Method method = TransactionMetaStoreHandler.class.getMethod("getConnectHandleState"); + method.setAccessible(true); + assertEquals(method.invoke(handler).toString(), "Closed"); + try { + handler.newTransactionAsync(10, TimeUnit.SECONDS).get(); + } catch (ExecutionException | InterruptedException e) { + assertTrue(e.getCause() + instanceof TransactionCoordinatorClientException.MetaStoreHandlerNotReadyException); + } + } + } + + public void start() throws Exception { + // wait transaction coordinator init success + pulsarClient.newTransaction() + .withTransactionTimeout(30, TimeUnit.SECONDS).build().get(); + pulsarClient.newTransaction() + .withTransactionTimeout(30, TimeUnit.SECONDS).build().get(); + + TransactionMetadataStoreService transactionMetadataStoreService = + getPulsarServiceList().get(0).getTransactionMetadataStoreService(); + // remove transaction metadap0-ta store + transactionMetadataStoreService.removeTransactionMetadataStore(TransactionCoordinatorID.get(0)).get(); + + } + + public void fence(TransactionMetadataStoreService transactionMetadataStoreService) throws Exception { + Field field = ManagedLedgerImpl.class.getDeclaredField("state"); + field.setAccessible(true); + field.set(((MLTransactionMetadataStore) transactionMetadataStoreService.getStores() + .get(TransactionCoordinatorID.get(0))).getManagedLedger(), ManagedLedgerImpl.State.Fenced); + } + public void unFence(TransactionMetadataStoreService transactionMetadataStoreService) throws Exception { + Field field = ManagedLedgerImpl.class.getDeclaredField("state"); + field.setAccessible(true); + field.set(((MLTransactionMetadataStore) transactionMetadataStoreService.getStores() + .get(TransactionCoordinatorID.get(0))).getManagedLedger(), ManagedLedgerImpl.State.LedgerOpened); + } + + public void waitToReady() throws Exception{ + TransactionMetadataStoreService transactionMetadataStoreService = + getPulsarServiceList().get(0).getTransactionMetadataStoreService(); + Class transactionMetadataStoreServiceClass = + TransactionMetadataStoreService.class; + Field field1 = + transactionMetadataStoreServiceClass.getDeclaredField("stores"); + field1.setAccessible(true); + Map stores = + (Map) field1 + .get(transactionMetadataStoreService); + Awaitility.await().until(() -> { + for (TransactionMetadataStore transactionMetadataStore : stores.values()) { + Class transactionMetadataStoreStateClass = + TransactionMetadataStoreState.class; + Field field = transactionMetadataStoreStateClass.getDeclaredField("state"); + field.setAccessible(true); + TransactionMetadataStoreState.State state = + (TransactionMetadataStoreState.State) field.get(transactionMetadataStore); + if (!state.equals(TransactionMetadataStoreState.State.Ready)) { + return false; + } + } + return true; + }); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java index d7cb6c9cc3220..2d174b9b1fb56 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndTest.java @@ -19,24 +19,33 @@ package org.apache.pulsar.client.impl; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyObject; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; - -import com.google.common.collect.Sets; - +import java.lang.reflect.Constructor; import java.lang.reflect.Field; +import java.util.Collection; import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.CompletableFuture; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; - +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.DefaultEventLoop; +import io.netty.util.concurrent.EventExecutor; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; - import org.apache.bookkeeper.mledger.Position; import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; import org.apache.bookkeeper.mledger.impl.PositionImpl; @@ -46,6 +55,7 @@ import org.apache.pulsar.broker.service.persistent.PersistentSubscription; import org.apache.pulsar.broker.transaction.TransactionTestBase; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.DeadLetterPolicy; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Producer; @@ -62,12 +72,10 @@ import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.transaction.TransactionImpl; import org.apache.pulsar.client.internal.DefaultImplementation; +import org.apache.pulsar.client.util.RetryMessageUtil; import org.apache.pulsar.common.api.proto.CommandAck; -import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.transaction.coordinator.TransactionCoordinatorID; import org.apache.pulsar.transaction.coordinator.TransactionMetadataStore; @@ -86,59 +94,28 @@ @Test(groups = "flaky") public class TransactionEndToEndTest extends TransactionTestBase { - private static final int TOPIC_PARTITION = 3; - - private static final String TENANT = "tnx"; - private static final String NAMESPACE1 = TENANT + "/ns1"; - private static final String TOPIC_OUTPUT = NAMESPACE1 + "/output"; - private static final String TOPIC_MESSAGE_ACK_TEST = NAMESPACE1 + "/message-ack-test"; - private static final int NUM_PARTITIONS = 16; + protected static final int TOPIC_PARTITION = 3; + protected static final String TOPIC_OUTPUT = NAMESPACE1 + "/output"; + protected static final String TOPIC_MESSAGE_ACK_TEST = NAMESPACE1 + "/message-ack-test"; + protected static final int NUM_PARTITIONS = 16; @BeforeMethod protected void setup() throws Exception { - setBrokerCount(1); - internalSetup(); - - String[] brokerServiceUrlArr = getPulsarServiceList().get(0).getBrokerServiceUrl().split(":"); - String webServicePort = brokerServiceUrlArr[brokerServiceUrlArr.length -1]; - admin.clusters().createCluster(CLUSTER_NAME, ClusterData.builder().serviceUrl("http://localhost:" + webServicePort).build()); - admin.tenants().createTenant(TENANT, - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NAMESPACE1); - admin.topics().createPartitionedTopic(TOPIC_OUTPUT, TOPIC_PARTITION); + conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); + setUpBase(1, NUM_PARTITIONS, TOPIC_OUTPUT, TOPIC_PARTITION); admin.topics().createPartitionedTopic(TOPIC_MESSAGE_ACK_TEST, 1); - - admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), - new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet(CLUSTER_NAME))); - admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); - admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), NUM_PARTITIONS); - - if (pulsarClient != null) { - pulsarClient.close(); - } - pulsarClient = PulsarClient.builder() - .serviceUrl(getPulsarServiceList().get(0).getBrokerServiceUrl()) - .statsInterval(0, TimeUnit.SECONDS) - .enableTransaction(true) - .build(); - - // wait tc init success to ready state - waitForCoordinatorToBeAvailable(NUM_PARTITIONS); } + } @AfterMethod(alwaysRun = true) protected void cleanup() { super.internalCleanup(); } - @Test - public void noBatchProduceCommitTest() throws Exception { - produceCommitTest(false); - } - - @Test - public void batchProduceCommitTest() throws Exception { - produceCommitTest(true); + @DataProvider(name = "enableBatch") + public Object[][] enableBatch() { + return new Object[][] { { Boolean.TRUE }, { Boolean.FALSE } }; } + @Test(dataProvider="enableBatch") private void produceCommitTest(boolean enableBatch) throws Exception { @Cleanup Consumer consumer = pulsarClient @@ -281,6 +258,63 @@ public void produceAbortTest() throws Exception { log.info("finished test partitionAbortTest"); } + @Test(dataProvider="enableBatch") + private void testAckWithTransactionReduceUnAckMessageCount(boolean enableBatch) throws Exception { + + final int messageCount = 50; + final String subName = "testAckWithTransactionReduceUnAckMessageCount"; + final String topicName = NAMESPACE1 + "/testAckWithTransactionReduceUnAckMessageCount-" + enableBatch; + @Cleanup + Consumer consumer = pulsarClient + .newConsumer() + .topic(topicName) + .subscriptionName(subName) + .subscriptionType(SubscriptionType.Shared) + .isAckReceiptEnabled(true) + .subscribe(); + Awaitility.await().until(consumer::isConnected); + + Producer producer = pulsarClient + .newProducer() + .topic(topicName) + .enableBatching(enableBatch) + .batchingMaxMessages(10) + .create(); + + CountDownLatch countDownLatch = new CountDownLatch(messageCount); + for (int i = 0; i < messageCount; i++) { + producer.sendAsync((i + "").getBytes()).thenRun(countDownLatch::countDown); + } + + countDownLatch.await(); + + Transaction txn = getTxn(); + + for (int i = 0; i < messageCount / 2; i++) { + Message message = consumer.receive(); + consumer.acknowledgeAsync(message.getMessageId(), txn).get(); + } + + txn.commit().get(); + boolean flag = false; + String topic = TopicName.get(topicName).toString(); + for (int i = 0; i < getPulsarServiceList().size(); i++) { + CompletableFuture> topicFuture = getPulsarServiceList().get(i) + .getBrokerService().getTopic(topic, false); + + if (topicFuture != null) { + Optional topicOptional = topicFuture.get(); + if (topicOptional.isPresent()) { + PersistentSubscription persistentSubscription = + (PersistentSubscription) topicOptional.get().getSubscription(subName); + assertEquals(persistentSubscription.getConsumers().get(0).getUnackedMessages(), messageCount / 2); + flag = true; + } + } + } + assertTrue(flag); + } + @Test public void txnIndividualAckTestNoBatchAndSharedSub() throws Exception { txnAckTest(false, 1, SubscriptionType.Shared); @@ -301,7 +335,7 @@ public void txnIndividualAckTestBatchAndFailoverSub() throws Exception { txnAckTest(true, 200, SubscriptionType.Failover); } - private void txnAckTest(boolean batchEnable, int maxBatchSize, + protected void txnAckTest(boolean batchEnable, int maxBatchSize, SubscriptionType subscriptionType) throws Exception { String normalTopic = NAMESPACE1 + "/normal-topic"; @@ -376,6 +410,23 @@ private void txnAckTest(boolean batchEnable, int maxBatchSize, } } + @Test + public void testAfterDeleteTopicOtherTopicCanRecover() throws Exception { + String topicOne = "persistent://" + NAMESPACE1 + "/topic-one"; + String topicTwo = "persistent://" + NAMESPACE1 + "/topic-two"; + String sub = "test"; + admin.topics().createNonPartitionedTopic(topicOne); + admin.topics().createSubscription(topicOne, "test", MessageId.earliest); + admin.topics().delete(topicOne); + + Producer producer = pulsarClient.newProducer(Schema.STRING).topic(topicTwo).create(); + Consumer consumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topicTwo).subscriptionName(sub).subscribe(); + String content = "test"; + producer.send(content); + assertEquals(consumer.receive().getValue(), content); + } + @Test public void txnMessageAckTest() throws Exception { String topic = TOPIC_MESSAGE_ACK_TEST; @@ -787,18 +838,45 @@ public void produceAndConsumeCloseStateTxnTest() throws Exception { } }); + Class transactionClass = TransactionImpl.class; + Constructor constructor = transactionClass + .getDeclaredConstructor(PulsarClientImpl.class, long.class, long.class, long.class); + constructor.setAccessible(true); + + TransactionImpl timeoutTxnSkipClientTimeout = constructor.newInstance(pulsarClient, 5, + timeoutTxn.getTxnID().getLeastSigBits(), timeoutTxn.getTxnID().getMostSigBits()); + try { - timeoutTxn.commit().get(); + timeoutTxnSkipClientTimeout.commit().get(); fail(); } catch (Exception e) { assertTrue(e.getCause() instanceof TransactionNotFoundException); } Field field = TransactionImpl.class.getDeclaredField("state"); field.setAccessible(true); - TransactionImpl.State state = (TransactionImpl.State) field.get(timeoutTxn); + TransactionImpl.State state = (TransactionImpl.State) field.get(timeoutTxnSkipClientTimeout); assertEquals(state, TransactionImpl.State.ERROR); } + @Test + public void testTxnTimeoutAtTransactionMetadataStore() throws Exception{ + TxnID txnID = pulsarServiceList.get(0).getTransactionMetadataStoreService() + .newTransaction(new TransactionCoordinatorID(0), 1).get(); + Awaitility.await().until(() -> { + try { + getPulsarServiceList().get(0).getTransactionMetadataStoreService().getTxnMeta(txnID).get(); + return false; + } catch (Exception e) { + return true; + } + }); + Collection transactionMetadataStores = + getPulsarServiceList().get(0).getTransactionMetadataStoreService().getStores().values(); + long timeoutCount = transactionMetadataStores.stream() + .mapToLong(store -> store.getMetadataStoreStats().timeoutCount).sum(); + Assert.assertEquals(timeoutCount, 1); + } + @Test public void transactionTimeoutTest() throws Exception { String topic = NAMESPACE1 + "/txn-timeout"; @@ -960,4 +1038,257 @@ public void oneTransactionOneTopicWithMultiSubTest() throws Exception { } assertTrue(flag); } + + @Test + public void testTxnTimeOutInClient() throws Exception{ + String topic = NAMESPACE1 + "/testTxnTimeOutInClient"; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING).producerName("testTxnTimeOut_producer") + .topic(topic).sendTimeout(0, TimeUnit.SECONDS).enableBatching(false).create(); + @Cleanup + Consumer consumer = pulsarClient.newConsumer(Schema.STRING).consumerName("testTxnTimeOut_consumer") + .topic(topic).subscriptionName("testTxnTimeOut_sub").subscribe(); + + Transaction transaction = pulsarClient.newTransaction().withTransactionTimeout(1, TimeUnit.SECONDS) + .build().get(); + producer.newMessage().send(); + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(((TransactionImpl)transaction).getState(), TransactionImpl.State.TIMEOUT); + }); + + try { + producer.newMessage(transaction).send(); + Assert.fail(); + } catch (Exception e) { + Assert.assertTrue(e.getCause().getCause() instanceof TransactionCoordinatorClientException + .InvalidTxnStatusException); + } + try { + Message message = consumer.receive(); + consumer.acknowledgeAsync(message.getMessageId(), transaction).get(); + Assert.fail(); + } catch (Exception e) { + Assert.assertTrue(e.getCause() instanceof TransactionCoordinatorClientException + .InvalidTxnStatusException); + } + } + + @Test + public void testSendTxnMessageTimeout() throws Exception { + String topic = NAMESPACE1 + "/testSendTxnMessageTimeout"; + @Cleanup + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer() + .topic(topic) + .sendTimeout(1, TimeUnit.SECONDS) + .create(); + + Transaction transaction = pulsarClient.newTransaction().withTransactionTimeout(5, TimeUnit.SECONDS) + .build().get(); + + // mock cnx, send message can't receive response + ClientCnx cnx = mock(ClientCnx.class); + Channel channel = mock(Channel.class); + doReturn(spy(DefaultEventLoop.class)).when(channel).eventLoop(); + ChannelHandlerContext channelHandlerContext = mock(ChannelHandlerContext.class); + doReturn(channel).when(channelHandlerContext).channel(); + doReturn(channelHandlerContext).when(cnx).ctx(); + EventExecutor eventExecutor = mock(EventExecutor.class); + doReturn(eventExecutor).when(channelHandlerContext).executor(); + CompletableFuture completableFuture = new CompletableFuture<>(); + completableFuture.complete(new ProducerResponse("test", 1, + "1".getBytes(), Optional.of(30L))); + doReturn(completableFuture).when(cnx).sendRequestWithId(anyObject(), anyLong()); + producer.getConnectionHandler().setClientCnx(cnx); + + + try { + // send message with txn use mock cnx, will not receive send response + producer.newMessage(transaction).value("Hello Pulsar!".getBytes()).send(); + fail(); + } catch (PulsarClientException ex) { + assertTrue(ex instanceof PulsarClientException.TimeoutException); + } + } + + @Test + public void testSendTxnAckMessageToDLQ() throws Exception { + String topic = NAMESPACE1 + "/testSendTxnAckMessageToDLQ"; + String subName = "test"; + String value = "test"; + @Cleanup + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer() + .topic(topic) + .enableBatching(false) + .sendTimeout(1, TimeUnit.SECONDS) + .create(); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionType(SubscriptionType.Shared) + // consumer can't receive the same message three times + .deadLetterPolicy(DeadLetterPolicy.builder().maxRedeliverCount(1).build()) + .subscriptionName(subName) + .subscribe(); + + @Cleanup + Consumer deadLetterConsumer = pulsarClient.newConsumer() + .topic(String.format("%s-%s" + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX, + topic, subName)) + .subscriptionType(SubscriptionType.Shared) + .deadLetterPolicy(DeadLetterPolicy.builder().maxRedeliverCount(1).build()) + .subscriptionName("test") + .subscribe(); + + producer.send(value.getBytes()); + Transaction transaction = pulsarClient.newTransaction().withTransactionTimeout(1, TimeUnit.MINUTES) + .build().get(); + + // consumer receive the message the first time, redeliverCount = 0 + consumer.acknowledgeAsync(consumer.receive().getMessageId(), transaction).get(); + + transaction.abort().get(); + + transaction = pulsarClient.newTransaction().withTransactionTimeout(5, TimeUnit.MINUTES) + .build().get(); + + // consumer receive the message the second time, redeliverCount = 1, also can be received + consumer.acknowledgeAsync(consumer.receive().getMessageId(), transaction).get(); + + transaction.abort().get(); + + // consumer receive the message the third time, redeliverCount = 2, + // the message will be sent to DLQ, can't receive + assertNull(consumer.receive(3, TimeUnit.SECONDS)); + + assertEquals(((ConsumerImpl) consumer).getAvailablePermits(), 3); + + assertEquals(value, new String(deadLetterConsumer.receive(3, TimeUnit.SECONDS).getValue())); + } + + @Test + public void testSendTxnAckBatchMessageToDLQ() throws Exception { + String topic = NAMESPACE1 + "/testSendTxnAckBatchMessageToDLQ"; + String subName = "test"; + String value1 = "test1"; + String value2 = "test2"; + @Cleanup + ProducerImpl producer = (ProducerImpl) pulsarClient.newProducer() + .topic(topic) + .sendTimeout(1, TimeUnit.SECONDS) + .create(); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(topic) + .subscriptionType(SubscriptionType.Shared) + // consumer can't receive the same message three times + .deadLetterPolicy(DeadLetterPolicy.builder().maxRedeliverCount(1).build()) + .subscriptionName(subName) + .subscribe(); + + @Cleanup + Consumer deadLetterConsumer = pulsarClient.newConsumer() + .topic(String.format("%s-%s" + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX, + topic, subName)) + .subscriptionType(SubscriptionType.Shared) + .deadLetterPolicy(DeadLetterPolicy.builder().maxRedeliverCount(1).build()) + .subscriptionName("test") + .subscribe(); + + producer.sendAsync(value1.getBytes()); + producer.sendAsync(value2.getBytes()); + Transaction transaction = pulsarClient.newTransaction().withTransactionTimeout(1, TimeUnit.MINUTES) + .build().get(); + + Message message = consumer.receive(); + assertEquals(value1, new String(message.getValue())); + // consumer receive the batch message one the first time, redeliverCount = 0 + consumer.acknowledgeAsync(message.getMessageId(), transaction).get(); + + transaction.abort().get(); + + // consumer will receive the batch message two and then receive + // the message one and message two again, redeliverCount = 1 + for (int i = 0; i < 3; i ++) { + message = consumer.receive(); + } + + transaction = pulsarClient.newTransaction().withTransactionTimeout(5, TimeUnit.MINUTES) + .build().get(); + + assertEquals(value2, new String(message.getValue())); + // consumer receive the batch message two the second time, redeliverCount = 1, also can be received + consumer.acknowledgeAsync(message.getMessageId(), transaction).get(); + + transaction.abort().get(); + + // consumer receive the batch message the third time, redeliverCount = 2, + // the message will be sent to DLQ, can't receive + assertNull(consumer.receive(3, TimeUnit.SECONDS)); + + assertEquals(((ConsumerImpl) consumer).getAvailablePermits(), 6); + + assertEquals(value1, new String(deadLetterConsumer.receive(3, TimeUnit.SECONDS).getValue())); + assertEquals(value2, new String(deadLetterConsumer.receive(3, TimeUnit.SECONDS).getValue())); + } + + @Test + public void testDelayedTransactionMessages() throws Exception { + String topic = NAMESPACE1 + "/testDelayedTransactionMessages"; + + @Cleanup + Consumer failoverConsumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("failover-sub") + .subscriptionType(SubscriptionType.Failover) + .subscribe(); + + @Cleanup + Consumer sharedConsumer = pulsarClient.newConsumer(Schema.STRING) + .topic(topic) + .subscriptionName("shared-sub") + .subscriptionType(SubscriptionType.Shared) + .subscribe(); + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .enableBatching(false) + .create(); + + Transaction transaction = pulsarClient.newTransaction() + .withTransactionTimeout(10, TimeUnit.SECONDS).build().get(); + for (int i = 0; i < 10; i++) { + producer.newMessage(transaction) + .value("msg-" + i) + .deliverAfter(5, TimeUnit.SECONDS) + .sendAsync(); + } + + producer.flush(); + + transaction.commit().get(); + + // Failover consumer will receive the messages immediately while + // the shared consumer will get them after the delay + Message msg = sharedConsumer.receive(1, TimeUnit.SECONDS); + assertNull(msg); + + for (int i = 0; i < 10; i++) { + msg = failoverConsumer.receive(100, TimeUnit.MILLISECONDS); + assertEquals(msg.getValue(), "msg-" + i); + } + + Set receivedMsgs = new TreeSet<>(); + for (int i = 0; i < 10; i++) { + msg = sharedConsumer.receive(10, TimeUnit.SECONDS); + receivedMsgs.add(msg.getValue()); + } + + assertEquals(receivedMsgs.size(), 10); + for (int i = 0; i < 10; i++) { + assertTrue(receivedMsgs.contains("msg-" + i)); + } + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndWithoutBatchIndexAckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndWithoutBatchIndexAckTest.java new file mode 100644 index 0000000000000..1ef3998c3467d --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/TransactionEndToEndWithoutBatchIndexAckTest.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.client.api.SubscriptionType; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * End to end transaction test. + */ +@Slf4j +@Test(groups = "flaky") +public class TransactionEndToEndWithoutBatchIndexAckTest extends TransactionEndToEndTest { + + @BeforeMethod + protected void setup() throws Exception { + conf.setAcknowledgmentAtBatchIndexLevelEnabled(false); + setUpBase(1, NUM_PARTITIONS, TOPIC_OUTPUT, TOPIC_PARTITION); + admin.topics().createPartitionedTopic(TOPIC_MESSAGE_ACK_TEST, 1); + } + + // TODO need to fix which using transaction with individual ack for failover subscription + @Test + public void txnIndividualAckTestBatchAndFailoverSub() throws Exception { + conf.setAcknowledgmentAtBatchIndexLevelEnabled(true); + txnAckTest(true, 200, SubscriptionType.Failover); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/common/naming/ServiceConfigurationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/common/naming/ServiceConfigurationTest.java index 078ad61249f26..80baa7c2f8e24 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/common/naming/ServiceConfigurationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/common/naming/ServiceConfigurationTest.java @@ -33,6 +33,7 @@ import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; import org.apache.pulsar.common.policies.data.InactiveTopicDeleteMode; +import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl; import org.testng.annotations.Test; @Test(groups = "broker-naming") @@ -61,6 +62,8 @@ public void testInit() throws Exception { assertEquals(config.getMaxMessagePublishBufferSizeInMB(), -1); assertEquals(config.getManagedLedgerDataReadPriority(), "bookkeeper-first"); assertEquals(config.getBacklogQuotaDefaultLimitGB(), 0.05); + OffloadPoliciesImpl offloadPolicies = OffloadPoliciesImpl.create(config.getProperties()); + assertEquals(offloadPolicies.getManagedLedgerOffloadedReadPriority().getValue(), "bookkeeper-first"); } @Test diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java index 3d410884e3042..4ae699b500f88 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactedTopicTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.compaction; +import static org.apache.pulsar.compaction.Compactor.COMPACTION_SUBSCRIPTION; import com.github.benmanes.caffeine.cache.AsyncLoadingCache; import com.google.common.collect.Sets; @@ -32,6 +33,7 @@ import java.util.Random; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.IntStream; @@ -241,7 +243,20 @@ public void testCleanupOldCompactedTopicLedger() throws Exception { Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD).close(); // update the compacted topic ledger - compactedTopic.newCompactedLedger(new PositionImpl(1,2), newCompactedLedger.getId()).get(); + PositionImpl newHorizon = new PositionImpl(1,3); + compactedTopic.newCompactedLedger(newHorizon, newCompactedLedger.getId()).get(); + + // Make sure the old compacted ledger still exist after the new compacted ledger created. + bk.openLedger(oldCompactedLedger.getId(), + Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, + Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD).close(); + + Assert.assertTrue(compactedTopic.getCompactedTopicContext().isPresent()); + Assert.assertEquals(compactedTopic.getCompactedTopicContext().get().getLedger().getId(), + newCompactedLedger.getId()); + Assert.assertTrue(compactedTopic.getCompactionHorizon().isPresent()); + Assert.assertEquals(compactedTopic.getCompactionHorizon().get(), newHorizon); + compactedTopic.deleteCompactedLedger(oldCompactedLedger.getId()).join(); // old ledger should be deleted, new still there try { @@ -437,4 +452,391 @@ public void testLastMessageIdForCompactedLedger() throws Exception { reader.readNext(); Assert.assertFalse(reader.hasMessageAvailable()); } + + @Test + public void testDoNotLossTheLastCompactedLedgerData() throws Exception { + String topic = "persistent://my-property/use/my-ns/testDoNotLossTheLastCompactedLedgerData-" + + UUID.randomUUID(); + final int numMessages = 2000; + final int keys = 200; + final String msg = "Test"; + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .blockIfQueueFull(true) + .maxPendingMessages(numMessages) + .enableBatching(false) + .create(); + CompletableFuture lastMessage = null; + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i % keys + "").value(msg).sendAsync(); + } + producer.flush(); + lastMessage.join(); + admin.topics().triggerCompaction(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, keys); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + }); + admin.topics().unload(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertEquals(stats.ledgers.size(), 1); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + }); + admin.topics().unload(topic); + // Send one more key to and then to trigger the compaction + producer.newMessage().key(keys + "").value(msg).send(); + admin.topics().triggerCompaction(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertEquals(stats.compactedLedger.entries, keys + 1); + }); + + // Make sure the reader can get all data from the compacted ledger and original ledger. + Reader reader = pulsarClient.newReader(Schema.STRING) + .topic(topic) + .startMessageId(MessageId.earliest) + .readCompacted(true) + .create(); + int received = 0; + while (reader.hasMessageAvailable()) { + reader.readNext(); + received++; + } + Assert.assertEquals(received, keys + 1); + reader.close(); + producer.close(); + } + + @Test + public void testReadCompactedDataWhenLedgerRolloverKickIn() throws Exception { + String topic = "persistent://my-property/use/my-ns/testReadCompactedDataWhenLedgerRolloverKickIn-" + + UUID.randomUUID(); + final int numMessages = 2000; + final int keys = 200; + final String msg = "Test"; + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .blockIfQueueFull(true) + .maxPendingMessages(numMessages) + .enableBatching(false) + .create(); + CompletableFuture lastMessage = null; + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i % keys + "").value(msg).sendAsync(); + } + producer.flush(); + lastMessage.join(); + admin.topics().triggerCompaction(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, keys); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + }); + // Send more 200 keys + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key((i % keys + keys) + "").value(msg).sendAsync(); + } + producer.flush(); + lastMessage.join(); + + // Make sure we have more than 1 original ledgers + admin.topics().unload(topic); + Awaitility.await().untilAsserted(() -> { + Assert.assertEquals(admin.topics().getInternalStats(topic).ledgers.size(), 2); + }); + + // Start a new reader to reading messages + Reader reader = pulsarClient.newReader(Schema.STRING) + .topic(topic) + .startMessageId(MessageId.earliest) + .readCompacted(true) + .receiverQueueSize(10) + .create(); + + // Send more 200 keys + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key((i % keys + keys * 2) + "").value(msg).sendAsync(); + } + producer.flush(); + lastMessage.join(); + + admin.topics().triggerCompaction(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, keys * 3); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + }); + + // The reader should read all 600 keys + int received = 0; + while (reader.hasMessageAvailable()) { + reader.readNext(); + received++; + } + Assert.assertEquals(received, keys * 3); + reader.close(); + producer.close(); + } + + @Test(timeOut = 120000) + public void testCompactionWithTopicUnloading() throws Exception { + String topic = "persistent://my-property/use/my-ns/testCompactionWithTopicUnloading-" + + UUID.randomUUID(); + final int numMessages = 2000; + final int keys = 500; + final String msg = "Test"; + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .blockIfQueueFull(true) + .maxPendingMessages(numMessages) + .enableBatching(false) + .create(); + CompletableFuture lastMessage = null; + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i % keys + "").value(msg).sendAsync(); + } + producer.flush(); + lastMessage.join(); + admin.topics().triggerCompaction(topic); + Awaitility.await().pollInterval(5, TimeUnit.SECONDS).untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, keys); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + }); + + admin.topics().unload(topic); + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key((i % keys + keys) + "").value(msg).sendAsync(); + } + producer.flush(); + lastMessage.join(); + admin.topics().triggerCompaction(topic); + Thread.sleep(100); + admin.topics().unload(topic); + admin.topics().triggerCompaction(topic); + Awaitility.await().pollInterval(3, TimeUnit.SECONDS).atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, keys * 2); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + }); + + // Start a new reader to reading messages + Reader reader = pulsarClient.newReader(Schema.STRING) + .topic(topic) + .startMessageId(MessageId.earliest) + .readCompacted(true) + .receiverQueueSize(10) + .create(); + + // The reader should read all 600 keys + int received = 0; + while (reader.hasMessageAvailable()) { + reader.readNext(); + received++; + } + Assert.assertEquals(received, keys * 2); + reader.close(); + producer.close(); + } + + @Test(timeOut = 1000 * 30) + public void testReader() throws Exception { + final String ns = "my-property/use/my-ns"; + String topic = "persistent://" + ns + "/t1"; + + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(topic) + .create(); + + producer.newMessage().key("k").value(("value").getBytes()).send(); + producer.newMessage().key("k").value(null).send(); + pulsar.getCompactor().compact(topic).get(); + + Awaitility.await() + .pollInterval(3, TimeUnit.SECONDS) + .atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { + admin.topics().unload(topic); + Thread.sleep(100); + Assert.assertTrue(admin.topics().getInternalStats(topic).lastConfirmedEntry.endsWith("-1")); + }); + // Make sure the last confirm entry is -1, then get last message id from compact ledger + PersistentTopicInternalStats internalStats = admin.topics().getInternalStats(topic); + Assert.assertTrue(internalStats.lastConfirmedEntry.endsWith("-1")); + // Because the latest value of the key `k` is null, so there is no data in compact ledger. + Assert.assertEquals(internalStats.compactedLedger.size, 0); + + @Cleanup + Reader reader = pulsarClient.newReader() + .topic(topic) + .startMessageIdInclusive() + .startMessageId(MessageId.earliest) + .readCompacted(true) + .create(); + Assert.assertFalse(reader.hasMessageAvailable()); + } + + @Test + public void testHasMessageAvailableWithNullValueMessage() throws Exception { + String topic = "persistent://my-property/use/my-ns/testHasMessageAvailable-" + + UUID.randomUUID(); + final int numMessages = 10; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .blockIfQueueFull(true) + .enableBatching(false) + .create(); + CompletableFuture lastMessage = null; + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i + "").value(String.format("msg [%d]", i)).sendAsync(); + } + + for (int i = numMessages / 2; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i + "").value(null).sendAsync(); + } + producer.flush(); + lastMessage.join(); + admin.topics().triggerCompaction(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, numMessages / 2); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + Assert.assertEquals(stats.lastConfirmedEntry, stats.cursors.get(COMPACTION_SUBSCRIPTION).markDeletePosition); + }); + + @Cleanup + Reader reader = pulsarClient.newReader() + .topic(topic) + .startMessageIdInclusive() + .startMessageId(MessageId.earliest) + .readCompacted(true) + .create(); + for (int i = numMessages / 2; i < numMessages; ++i) { + reader.readNext(); + } + Assert.assertFalse(reader.hasMessageAvailable()); + Assert.assertNull(reader.readNext(3, TimeUnit.SECONDS)); + } + + @Test + public void testReadCompleteMessagesDuringTopicUnloading() throws Exception { + String topic = "persistent://my-property/use/my-ns/testReadCompleteMessagesDuringTopicUnloading-" + + UUID.randomUUID(); + final int numMessages = 1000; + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .blockIfQueueFull(true) + .enableBatching(false) + .create(); + CompletableFuture lastMessage = null; + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i + "").value(String.format("msg [%d]", i)).sendAsync(); + } + producer.flush(); + lastMessage.join(); + admin.topics().triggerCompaction(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, numMessages); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + Assert.assertEquals(stats.lastConfirmedEntry, stats.cursors.get(COMPACTION_SUBSCRIPTION).markDeletePosition); + }); + // Unload the topic to make sure the original ledger been deleted. + admin.topics().unload(topic); + // Produce more messages to the original topic + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i + numMessages + "").value(String.format("msg [%d]", i + numMessages)).sendAsync(); + } + producer.flush(); + lastMessage.join(); + // For now the topic has 1000 messages in the compacted ledger and 1000 messages in the original topic. + @Cleanup + Reader reader = pulsarClient.newReader(Schema.STRING) + .topic(topic) + .startMessageIdInclusive() + .startMessageId(MessageId.earliest) + .readCompacted(true) + .create(); + + // Unloading the topic during reading the data to make sure the reader will not miss any messages. + for (int i = 0; i < numMessages / 2; ++i) { + Assert.assertEquals(reader.readNext().getValue(), String.format("msg [%d]", i)); + } + admin.topics().unload(topic); + for (int i = 0; i < numMessages / 2; ++i) { + Assert.assertEquals(reader.readNext().getValue(), String.format("msg [%d]", i + numMessages / 2)); + } + admin.topics().unload(topic); + for (int i = 0; i < numMessages; ++i) { + Assert.assertEquals(reader.readNext().getValue(), String.format("msg [%d]", i + numMessages)); + } + } + + @Test + public void testReadCompactedLatestMessageWithInclusive() throws Exception { + String topic = "persistent://my-property/use/my-ns/testLedgerRollover-" + + UUID.randomUUID(); + final int numMessages = 1; + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.STRING) + .topic(topic) + .blockIfQueueFull(true) + .enableBatching(false) + .create(); + + CompletableFuture lastMessage = null; + for (int i = 0; i < numMessages; ++i) { + lastMessage = producer.newMessage().key(i + "").value(String.format("msg [%d]", i)).sendAsync(); + } + producer.flush(); + lastMessage.join(); + admin.topics().unload(topic); + admin.topics().triggerCompaction(topic); + Awaitility.await().untilAsserted(() -> { + PersistentTopicInternalStats stats = admin.topics().getInternalStats(topic); + Assert.assertNotEquals(stats.compactedLedger.ledgerId, -1); + Assert.assertEquals(stats.compactedLedger.entries, numMessages); + Assert.assertEquals(admin.topics().getStats(topic) + .getSubscriptions().get(COMPACTION_SUBSCRIPTION).getConsumers().size(), 0); + Assert.assertEquals(stats.lastConfirmedEntry, stats.cursors.get(COMPACTION_SUBSCRIPTION).markDeletePosition); + }); + + Awaitility.await() + .pollInterval(3, TimeUnit.SECONDS) + .atMost(30, TimeUnit.SECONDS).untilAsserted(() -> { + admin.topics().unload(topic); + Assert.assertTrue(admin.topics().getInternalStats(topic).lastConfirmedEntry.endsWith("-1")); + }); + + @Cleanup + Reader reader = pulsarClient.newReader() + .topic(topic) + .startMessageIdInclusive() + .startMessageId(MessageId.latest) + .readCompacted(true) + .create(); + + Assert.assertTrue(reader.hasMessageAvailable()); + Assert.assertEquals(reader.readNext().getMessageId(), lastMessage.get()); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java index c9e0d95f7ffdc..ab9e62c6face5 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/compaction/CompactionTest.java @@ -28,6 +28,7 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; @@ -43,31 +44,30 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; + +import io.netty.buffer.ByteBuf; +import lombok.Cleanup; import org.apache.bookkeeper.client.BookKeeper; import org.apache.bookkeeper.client.api.OpenBuilder; +import org.apache.bookkeeper.mledger.AsyncCallbacks; +import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.ManagedLedgerInfo; +import org.apache.bookkeeper.mledger.Position; import org.apache.commons.lang3.tuple.Pair; +import org.apache.pulsar.broker.BrokerTestUtil; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.broker.service.Topic; import org.apache.pulsar.broker.service.persistent.PersistentTopic; -import org.apache.pulsar.client.api.CompressionType; -import org.apache.pulsar.client.api.Consumer; -import org.apache.pulsar.client.api.CryptoKeyReader; -import org.apache.pulsar.client.api.EncryptionKeyInfo; -import org.apache.pulsar.client.api.Message; -import org.apache.pulsar.client.api.MessageId; -import org.apache.pulsar.client.api.MessageRoutingMode; -import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.ProducerBuilder; -import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.client.api.Reader; -import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.*; import org.apache.pulsar.client.impl.BatchMessageIdImpl; +import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.PersistentTopicInternalStats; import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.protocol.Markers; import org.apache.pulsar.common.util.FutureUtil; +import org.awaitility.Awaitility; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -1656,4 +1656,63 @@ public void testReadUnCompacted(boolean batchEnabled) throws PulsarClientExcepti assertNull(none); } } + + @Test(timeOut = 60000) + public void testCompactionWithMarker() throws Exception { + String namespace = "my-property/use/my-ns"; + final TopicName dest = TopicName.get( + BrokerTestUtil.newUniqueName("persistent://" + namespace + "/testWriteMarker")); + admin.topics().createNonPartitionedTopic(dest.toString()); + @Cleanup + Consumer consumer = pulsarClient.newConsumer() + .topic(dest.toString()) + .subscriptionName("test-compaction-sub") + .subscriptionType(SubscriptionType.Exclusive) + .readCompacted(true) + .subscriptionInitialPosition(SubscriptionInitialPosition.Latest) + .subscribe(); + @Cleanup + Producer producer = pulsarClient.newProducer() + .topic(dest.toString()) + .enableBatching(false) + .messageRoutingMode(MessageRoutingMode.SinglePartition) + .create(); + producer.send("msg-1".getBytes(StandardCharsets.UTF_8)); + Optional topic = pulsar.getBrokerService().getTopic(dest.toString(), true).join(); + Assert.assertTrue(topic.isPresent()); + PersistentTopic persistentTopic = (PersistentTopic) topic.get(); + Random random = new Random(); + for (int i = 0; i < 100; i++) { + int rad = random.nextInt(3); + ByteBuf marker; + if (rad == 0) { + marker = Markers.newTxnCommitMarker(-1L, 0, i); + } else if (rad == 1) { + marker = Markers.newTxnAbortMarker(-1L, 0, i); + } else { + marker = Markers.newReplicatedSubscriptionsSnapshotRequest(UUID.randomUUID().toString(), "r1"); + } + persistentTopic.getManagedLedger().asyncAddEntry(marker, new AsyncCallbacks.AddEntryCallback() { + @Override + public void addComplete(Position position, ByteBuf entryData, Object ctx) { + // + } + + @Override + public void addFailed(ManagedLedgerException exception, Object ctx) { + // + } + }, null); + marker.release(); + } + producer.send("msg-2".getBytes(StandardCharsets.UTF_8)); + admin.topics().triggerCompaction(dest.toString()); + Awaitility.await() + .atMost(50, TimeUnit.SECONDS) + .pollInterval(1, TimeUnit.SECONDS) + .untilAsserted(() -> { + long ledgerId = admin.topics().getInternalStats(dest.toString()).compactedLedger.ledgerId; + Assert.assertNotEquals(ledgerId, -1L); + }); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java index e7173a23e39b5..c399cd8b73419 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionE2ESecurityTest.java @@ -125,7 +125,7 @@ void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName("use"); Set superUsers = Sets.newHashSet(ADMIN_SUBJECT); config.setSuperUserRoles(superUsers); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java index ed6c4aa8912ac..c48de5257b381 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionLocalRunTest.java @@ -193,7 +193,7 @@ void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName(CLUSTER); Set superUsers = Sets.newHashSet("superUser", "admin"); config.setSuperUserRoles(superUsers); @@ -304,7 +304,7 @@ void shutdown() throws Exception { } } - private WorkerConfig createWorkerConfig(ServiceConfiguration config) { + protected WorkerConfig createWorkerConfig(ServiceConfiguration config) { System.setProperty(JAVA_INSTANCE_JAR_PROPERTY, FutureUtil.class.getProtectionDomain().getCodeSource().getLocation().getPath()); @@ -560,7 +560,7 @@ private void testE2EPulsarFunctionLocalRun(String jarFilePathUrl, int parallelis } } - private void testE2EPulsarFunctionLocalRun(String jarFilePathUrl) throws Exception { + protected void testE2EPulsarFunctionLocalRun(String jarFilePathUrl) throws Exception { testE2EPulsarFunctionLocalRun(jarFilePathUrl, 1); } @@ -1133,7 +1133,7 @@ private void runWithNarClassLoader(Assert.ThrowingRunnable throwingRunnable) thr } } - private void runWithPulsarFunctionsClassLoader(Assert.ThrowingRunnable throwingRunnable) throws Throwable { + protected void runWithPulsarFunctionsClassLoader(Assert.ThrowingRunnable throwingRunnable) throws Throwable { ClassLoader originalClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(pulsarApiExamplesClassLoader); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionMetadataStoreTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionMetadataStoreTest.java new file mode 100644 index 0000000000000..715837f4e2603 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionMetadataStoreTest.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.worker; + +import static org.apache.commons.lang3.StringUtils.isNotBlank; +import static org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest.retryStrategically; +import static org.apache.pulsar.functions.utils.functioncache.FunctionCacheEntry.JAVA_INSTANCE_JAR_PROPERTY; +import static org.mockito.Mockito.spy; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Method; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.PulsarService; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.ServiceConfigurationUtils; +import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; +import org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider; +import org.apache.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl; +import org.apache.pulsar.client.admin.BrokerStats; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.Authentication; +import org.apache.pulsar.client.api.ClientBuilder; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.schema.GenericRecord; +import org.apache.pulsar.client.api.schema.SchemaDefinition; +import org.apache.pulsar.client.impl.auth.AuthenticationTls; +import org.apache.pulsar.common.functions.ConsumerConfig; +import org.apache.pulsar.common.functions.FunctionConfig; +import org.apache.pulsar.common.functions.Utils; +import org.apache.pulsar.common.io.SinkConfig; +import org.apache.pulsar.common.io.SourceConfig; +import org.apache.pulsar.common.nar.NarClassLoader; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.ConsumerStats; +import org.apache.pulsar.common.policies.data.PublisherStats; +import org.apache.pulsar.common.policies.data.SubscriptionStats; +import org.apache.pulsar.common.policies.data.TenantInfo; +import org.apache.pulsar.common.policies.data.TopicStats; +import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.functions.LocalRunner; +import org.apache.pulsar.functions.api.Record; +import org.apache.pulsar.functions.instance.state.PulsarMetadataStateStoreProviderImpl; +import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactory; +import org.apache.pulsar.functions.runtime.thread.ThreadRuntimeFactoryConfig; +import org.apache.pulsar.functions.utils.FunctionCommon; +import org.apache.pulsar.io.core.Sink; +import org.apache.pulsar.io.core.SinkContext; +import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +/** + * Test Pulsar sink on function + */ +@Slf4j +@Test +public class PulsarFunctionMetadataStoreTest extends PulsarFunctionLocalRunTest { + + + protected WorkerConfig createWorkerConfig(ServiceConfiguration config) { + WorkerConfig wc = super.createWorkerConfig(config); + wc.setStateStorageProviderImplementation(PulsarMetadataStateStoreProviderImpl.class.getName()); + wc.setStateStorageServiceUrl("memory://local"); + return wc; + } + + @Test + public void testE2EPulsarFunctionLocalRun() throws Throwable { + runWithPulsarFunctionsClassLoader(() -> testE2EPulsarFunctionLocalRun(null)); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java index 9f5e525d6b715..d985241e2903d 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarFunctionPublishTest.java @@ -119,7 +119,7 @@ void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName("use"); Set superUsers = Sets.newHashSet("superUser", "admin"); config.setSuperUserRoles(superUsers); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java index 019ca0bd577f4..1b66099c14c55 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/functions/worker/PulsarWorkerAssignmentTest.java @@ -89,7 +89,7 @@ void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName("use"); final Set superUsers = Sets.newHashSet("superUser", "admin"); config.setSuperUserRoles(superUsers); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java index 1df4be1c71d0d..93cdd79ad01f4 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/AbstractPulsarE2ETest.java @@ -114,7 +114,7 @@ public void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName("use"); Set superUsers = Sets.newHashSet("superUser", "admin"); config.setSuperUserRoles(superUsers); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java index ddb1fb3d736d3..ffab0b78a2700 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionAdminTest.java @@ -96,7 +96,7 @@ void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setClusterName("use"); Set superUsers = Sets.newHashSet("superUser", "admin"); config.setSuperUserRoles(superUsers); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java index 5867bb78404d6..15ee27dc3a56c 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/io/PulsarFunctionTlsTest.java @@ -105,7 +105,7 @@ void setup(Method method) throws Exception { bkEnsemble = new LocalBookkeeperEnsemble(3, 0, () -> 0); bkEnsemble.start(); - config = spy(new ServiceConfiguration()); + config = spy(ServiceConfiguration.class); config.setBrokerShutdownTimeoutMs(0L); config.setClusterName("use"); Set superUsers = Sets.newHashSet("superUser", "admin"); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java index dab9d08ac143e..1a0921a8aea5f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/schema/SchemaTest.java @@ -20,7 +20,6 @@ import static org.apache.pulsar.common.naming.TopicName.PUBLIC_TENANT; import static org.apache.pulsar.schema.compatibility.SchemaCompatibilityCheckTest.randomName; - import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; @@ -28,13 +27,10 @@ import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import static org.testng.internal.junit.ArrayAsserts.assertArrayEquals; - -import org.apache.avro.Schema.Parser; - import com.fasterxml.jackson.databind.JsonNode; import com.google.common.collect.Sets; - import java.io.ByteArrayInputStream; +import java.io.Serializable; import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashMap; @@ -44,9 +40,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; - import lombok.Cleanup; +import lombok.EqualsAndHashCode; import lombok.extern.slf4j.Slf4j; +import org.apache.avro.Schema.Parser; import org.apache.bookkeeper.client.BKException; import org.apache.bookkeeper.client.BookKeeper; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; @@ -59,7 +56,9 @@ import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SchemaSerializationException; import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.api.TypedMessageBuilder; import org.apache.pulsar.client.api.schema.GenericRecord; import org.apache.pulsar.client.api.schema.SchemaDefinition; @@ -658,9 +657,11 @@ public void testNullKeyValueProperty() throws PulsarAdminException, PulsarClient final Map map = new HashMap<>(); map.put("key", null); map.put(null, "value"); // null key is not allowed for JSON, it's only for test here - ((SchemaInfoImpl)Schema.INT32.getSchemaInfo()).setProperties(map); - final Consumer consumer = pulsarClient.newConsumer(Schema.INT32).topic(topic) + final Schema integerSchema = Schema.JSON(Integer.class); + ((SchemaInfoImpl) integerSchema.getSchemaInfo()).setProperties(map); + + final Consumer consumer = pulsarClient.newConsumer(integerSchema).topic(topic) .subscriptionName("sub") .subscribe(); consumer.close(); @@ -724,6 +725,96 @@ public void testDeleteTopicAndSchema() throws Exception { } } + @Test + public void testDeleteTopicAndSchemaForV1() throws Exception { + final String tenant = PUBLIC_TENANT; + final String cluster = CLUSTER_NAME; + final String namespace = "test-namespace-" + randomName(16); + final String topicOne = "not-partitioned-topic"; + final String topic2 = "persistent://" + tenant + "/" + cluster + "/" + namespace + "/partitioned-topic"; + + // persistent, not-partitioned v1/topic + final String topic1 = TopicName.get( + TopicDomain.persistent.value(), + tenant, + cluster, + namespace, + topicOne).toString(); + + // persistent, partitioned v1/topic + admin.topics().createPartitionedTopic(topic2, 1); + + @Cleanup + Producer p1_1 = pulsarClient.newProducer(Schema.JSON(Schemas.PersonOne.class)) + .topic(topic1) + .create(); + + @Cleanup + Producer p1_2 = pulsarClient.newProducer(Schema.JSON(Schemas.PersonThree.class)) + .topic(topic1) + .create(); + @Cleanup + Producer p2_1 = pulsarClient.newProducer(Schema.JSON(Schemas.PersonThree.class)) + .topic(topic2) + .create(); + + List> schemaFutures1 = + this.getPulsar().getSchemaRegistryService().getAllSchemas(TopicName.get(topic1).getSchemaName()).get(); + FutureUtil.waitForAll(schemaFutures1).get(); + List schemas1 = schemaFutures1.stream().map(future -> { + try { + return future.get(); + } catch (Exception e) { + return null; + } + }).collect(Collectors.toList()); + assertEquals(schemas1.size(), 2); + for (SchemaRegistry.SchemaAndMetadata schema : schemas1) { + assertNotNull(schema); + } + + List> schemaFutures2 = + this.getPulsar().getSchemaRegistryService().getAllSchemas(TopicName.get(topic2).getSchemaName()).get(); + FutureUtil.waitForAll(schemaFutures2).get(); + List schemas2 = schemaFutures2.stream().map(future -> { + try { + return future.get(); + } catch (Exception e) { + return null; + } + }).collect(Collectors.toList()); + assertEquals(schemas2.size(), 1); + for (SchemaRegistry.SchemaAndMetadata schema : schemas2) { + assertNotNull(schema); + } + + // not-force and not-delete-schema when delete topic + try { + admin.topics().delete(topic1, false, false); + fail(); + } catch (Exception e) { + assertTrue(e.getMessage().startsWith("Topic has active producers/subscriptions")); + } + assertEquals(this.getPulsar().getSchemaRegistryService() + .trimDeletedSchemaAndGetList(TopicName.get(topic1).getSchemaName()).get().size(), 2); + try { + admin.topics().deletePartitionedTopic(topic2, false, false); + fail(); + } catch (Exception e) { + assertTrue(e.getMessage().startsWith("Topic has active producers/subscriptions")); + } + assertEquals(this.getPulsar().getSchemaRegistryService() + .trimDeletedSchemaAndGetList(TopicName.get(topic2).getSchemaName()).get().size(), 1); + + // force and delete-schema when delete topic + admin.topics().delete(topic1, true, true); + assertEquals(this.getPulsar().getSchemaRegistryService() + .trimDeletedSchemaAndGetList(TopicName.get(topic1).getSchemaName()).get().size(), 0); + admin.topics().deletePartitionedTopic(topic2, true, true); + assertEquals(this.getPulsar().getSchemaRegistryService() + .trimDeletedSchemaAndGetList(TopicName.get(topic2).getSchemaName()).get().size(), 0); + } + @Test public void testProducerMultipleSchemaMessages() throws Exception { final String tenant = PUBLIC_TENANT; @@ -757,6 +848,9 @@ public void testProducerMultipleSchemaMessages() throws Exception { producer.newMessage(Schema.NATIVE_AVRO(personThreeSchemaAvroNative)).value(content).send(); List allSchemas = admin.schemas().getAllSchemas(topic); + allSchemas.forEach(schemaInfo -> { + ((SchemaInfoImpl)schemaInfo).setTimestamp(0); + }); Assert.assertEquals(allSchemas.size(), 5); Assert.assertEquals(allSchemas.get(0), Schema.STRING.getSchemaInfo()); Assert.assertEquals(allSchemas.get(1), Schema.JSON(Schemas.PersonThree.class).getSchemaInfo()); @@ -800,6 +894,7 @@ public void testNullKey() throws Exception { assertEquals("foo", message.getValue()); } + @Test public void testConsumeMultipleSchemaMessages() throws Exception { final String namespace = "test-namespace-" + randomName(16); String ns = PUBLIC_TENANT + "/" + namespace; @@ -971,4 +1066,74 @@ private void checkSchemaForAutoSchema(Message message) { } } + @Test + public void testAvroSchemaWithHttpLookup() throws Exception { + stopBroker(); + isTcpLookup = false; + setup(); + testIncompatibleSchema(); + } + + @Test + public void testAvroSchemaWithTcpLookup() throws Exception { + stopBroker(); + isTcpLookup = true; + setup(); + testIncompatibleSchema(); + } + + private void testIncompatibleSchema() throws Exception { + final String namespace = "test-namespace-" + randomName(16); + String ns = PUBLIC_TENANT + "/" + namespace; + admin.namespaces().createNamespace(ns, Sets.newHashSet(CLUSTER_NAME)); + + final String autoProducerTopic = getTopicName(ns, "testEmptySchema"); + + @Cleanup + Consumer consumer = pulsarClient + .newConsumer(Schema.AVRO(User.class)) + .topic(autoProducerTopic) + .subscriptionType(SubscriptionType.Shared) + .subscriptionName("sub-1") + .subscribe(); + + @Cleanup + Producer userProducer = pulsarClient + .newProducer(Schema.AVRO(User.class)) + .topic(autoProducerTopic) + .enableBatching(false) + .create(); + + @Cleanup + Producer producer = pulsarClient + .newProducer() + .topic(autoProducerTopic) + .enableBatching(false) + .create(); + + User test = new User("test"); + userProducer.send(test); + producer.send("test".getBytes(StandardCharsets.UTF_8)); + Message message1 = consumer.receive(); + Assert.assertEquals(test, message1.getValue()); + Message message2 = consumer.receive(); + try { + message2.getValue(); + } catch (SchemaSerializationException e) { + final String schemaString = + new String(Schema.AVRO(User.class).getSchemaInfo().getSchema(), StandardCharsets.UTF_8); + Assert.assertTrue(e.getMessage().contains(schemaString)); + Assert.assertTrue(e.getMessage().contains("payload (4 bytes)")); + } + } + + @EqualsAndHashCode + static class User implements Serializable { + private String name; + public User() {} + public User(String name) { + this.name = name; + } + } + } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java index 8def5dc3f06e6..9123ea33066dd 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaCompatibilityCheckTest.java @@ -25,6 +25,7 @@ import com.google.common.collect.Sets; import java.util.Collections; import java.util.concurrent.ThreadLocalRandom; +import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; import org.apache.pulsar.client.api.Consumer; @@ -35,15 +36,13 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SchemaSerializationException; import org.apache.pulsar.client.api.schema.SchemaDefinition; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.ClusterData; -import org.apache.pulsar.common.policies.data.ClusterDataImpl; +import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.SchemaCompatibilityStrategy; import org.apache.pulsar.common.policies.data.TenantInfo; -import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; import org.apache.pulsar.schema.Schemas; @@ -221,6 +220,91 @@ public void testConsumerCompatibilityReadAllCheckTest(SchemaCompatibilityStrateg } } + @Test(dataProvider = "AllCheckSchemaCompatibilityStrategy") + public void testBrokerAllowAutoUpdateSchemaDisabled(SchemaCompatibilityStrategy schemaCompatibilityStrategy) + throws Exception { + + final String tenant = PUBLIC_TENANT; + final String topic = "test-consumer-compatibility"; + String namespace = "test-namespace-" + randomName(16); + String fqtn = TopicName.get( + TopicDomain.persistent.value(), + tenant, + namespace, + topic + ).toString(); + + NamespaceName namespaceName = NamespaceName.get(tenant, namespace); + + admin.namespaces().createNamespace( + tenant + "/" + namespace, + Sets.newHashSet(CLUSTER_NAME) + ); + + assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(namespaceName.toString()), + SchemaCompatibilityStrategy.UNDEFINED); + + admin.namespaces().setSchemaCompatibilityStrategy(namespaceName.toString(), schemaCompatibilityStrategy); + admin.schemas().createSchema(fqtn, Schema.AVRO(Schemas.PersonOne.class).getSchemaInfo()); + + + pulsar.getConfig().setAllowAutoUpdateSchemaEnabled(false); + + ProducerBuilder producerThreeBuilder = pulsarClient + .newProducer(Schema.AVRO(SchemaDefinition.builder().withAlwaysAllowNull + (false).withSupportSchemaVersioning(true). + withPojo(Schemas.PersonTwo.class).build())) + .topic(fqtn); + try { + producerThreeBuilder.create(); + } catch (Exception e) { + Assert.assertTrue(e.getMessage().contains("Schema not found and schema auto updating is disabled.")); + } + + pulsar.getConfig().setAllowAutoUpdateSchemaEnabled(true); + Policies policies = admin.namespaces().getPolicies(namespaceName.toString()); + Assert.assertTrue(policies.is_allow_auto_update_schema); + + ConsumerBuilder comsumerBuilder = pulsarClient.newConsumer(Schema.AVRO( + SchemaDefinition.builder().withAlwaysAllowNull + (false).withSupportSchemaVersioning(true). + withPojo(Schemas.PersonTwo.class).build())) + .subscriptionName("test") + .topic(fqtn); + + Producer producer = producerThreeBuilder.create(); + Consumer consumerTwo = comsumerBuilder.subscribe(); + + producer.send(new Schemas.PersonTwo(2, "Lucy")); + Message message = consumerTwo.receive(); + + Schemas.PersonTwo personTwo = message.getValue(); + consumerTwo.acknowledge(message); + + assertEquals(personTwo.getId(), 2); + assertEquals(personTwo.getName(), "Lucy"); + + producer.close(); + consumerTwo.close(); + + pulsar.getConfig().setAllowAutoUpdateSchemaEnabled(false); + + producer = producerThreeBuilder.create(); + consumerTwo = comsumerBuilder.subscribe(); + + producer.send(new Schemas.PersonTwo(2, "Lucy")); + message = consumerTwo.receive(); + + personTwo = message.getValue(); + consumerTwo.acknowledge(message); + + assertEquals(personTwo.getId(), 2); + assertEquals(personTwo.getName(), "Lucy"); + + consumerTwo.close(); + producer.close(); + } + @Test(dataProvider = "AllCheckSchemaCompatibilityStrategy") public void testIsAutoUpdateSchema(SchemaCompatibilityStrategy schemaCompatibilityStrategy) throws Exception { final String tenant = PUBLIC_TENANT; @@ -242,7 +326,7 @@ public void testIsAutoUpdateSchema(SchemaCompatibilityStrategy schemaCompatibili ); assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(namespaceName.toString()), - SchemaCompatibilityStrategy.FULL); + SchemaCompatibilityStrategy.UNDEFINED); admin.namespaces().setSchemaCompatibilityStrategy(namespaceName.toString(), schemaCompatibilityStrategy); admin.schemas().createSchema(fqtn, Schema.AVRO(Schemas.PersonOne.class).getSchemaInfo()); @@ -321,10 +405,10 @@ public void testSchemaComparison() throws Exception { ); assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(namespaceName.toString()), - SchemaCompatibilityStrategy.FULL); + SchemaCompatibilityStrategy.UNDEFINED); byte[] changeSchemaBytes = (new String(Schema.AVRO(Schemas.PersonOne.class) .getSchemaInfo().getSchema(), UTF_8) + "/n /n /n").getBytes(); - SchemaInfo schemaInfo = SchemaInfoImpl.builder().type(SchemaType.AVRO).schema(changeSchemaBytes).build(); + SchemaInfo schemaInfo = SchemaInfo.builder().type(SchemaType.AVRO).schema(changeSchemaBytes).build(); admin.schemas().createSchema(fqtn, schemaInfo); admin.namespaces().setIsAllowAutoUpdateSchema(namespaceName.toString(), false); @@ -395,9 +479,58 @@ public void testProducerSendWithOldSchemaAndConsumerCanRead(SchemaCompatibilityS consumerOne.close(); producerOne.close(); + } + + @Test + public void testSchemaLedgerAutoRelease() throws Exception { + String namespaceName = PUBLIC_TENANT + "/default"; + String topicName = "persistent://" + namespaceName + "/tp"; + admin.namespaces().createNamespace(namespaceName, Sets.newHashSet(CLUSTER_NAME)); + admin.namespaces().setSchemaCompatibilityStrategy(namespaceName, SchemaCompatibilityStrategy.ALWAYS_COMPATIBLE); + // Update schema 100 times. + for (int i = 0; i < 100; i++){ + Schema schema = Schema.JSON(SchemaDefinition.builder() + .withJsonDef(String.format("{\"type\": \"record\",\"name\": " + + "\"Test_Pojo\",\"namespace\": \"org.apache.pulsar.schema.compatibility\"," + + "\"fields\": [{\"name\": \"prop_%s\",\"type\": " + + "[\"null\", \"string\"],\"default\": null}]}", i)) + .build()); + Producer producer = pulsarClient + .newProducer(schema) + .topic(topicName) + .create(); + producer.close(); + } + // The other ledgers are about 5. + Assert.assertTrue(mockBookKeeper.getLedgerMap().values().stream() + .filter(ledger -> !ledger.isFenced()) + .collect(Collectors.toList()).size() < 20); + admin.topics().delete(topicName, true); } + @Test + public void testAutoProduceSchemaAlwaysCompatible() throws Exception { + final String tenant = PUBLIC_TENANT; + final String topic = "topic" + randomName(16); + + String namespace = "test-namespace-" + randomName(16); + String topicName = TopicName.get( + TopicDomain.persistent.value(), tenant, namespace, topic).toString(); + NamespaceName namespaceName = NamespaceName.get(tenant, namespace); + admin.namespaces().createNamespace(tenant + "/" + namespace, Sets.newHashSet(CLUSTER_NAME)); + + // set ALWAYS_COMPATIBLE + admin.namespaces().setSchemaCompatibilityStrategy(namespaceName.toString(), SchemaCompatibilityStrategy.ALWAYS_COMPATIBLE); + + Producer producer = pulsarClient.newProducer(Schema.AUTO_PRODUCE_BYTES()).topic(topicName).create(); + // should not fail + Consumer consumer = pulsarClient.newConsumer(Schema.STRING).subscriptionName("my-sub").topic(topicName).subscribe(); + + producer.close(); + consumer.close(); + } + @Test(dataProvider = "CanReadLastSchemaCompatibilityStrategy") public void testConsumerWithNotCompatibilitySchema(SchemaCompatibilityStrategy schemaCompatibilityStrategy) throws Exception { final String tenant = PUBLIC_TENANT; diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaTypeCompatibilityCheckTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaTypeCompatibilityCheckTest.java index 1367d4dbccd80..3fb047a2215d0 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaTypeCompatibilityCheckTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/schema/compatibility/SchemaTypeCompatibilityCheckTest.java @@ -37,8 +37,8 @@ import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.schema.Schemas; -import org.testng.annotations.AfterClass; -import org.testng.annotations.BeforeClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import java.util.Collections; @@ -57,7 +57,7 @@ public class SchemaTypeCompatibilityCheckTest extends MockedPulsarServiceBaseTes private static final String namespace = "test-namespace"; private static final String namespaceName = PUBLIC_TENANT + "/" + namespace; - @BeforeClass + @BeforeMethod @Override public void setup() throws Exception { super.internalSetup(); @@ -73,7 +73,7 @@ public void setup() throws Exception { } - @AfterClass(alwaysRun = true) + @AfterMethod(alwaysRun = true) @Override public void cleanup() throws Exception { super.internalCleanup(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSetTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSetTest.java new file mode 100644 index 0000000000000..3c53fc159d027 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/utils/ConcurrentBitmapSortedLongPairSetTest.java @@ -0,0 +1,211 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.utils; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import lombok.Cleanup; +import org.apache.pulsar.common.util.collections.ConcurrentLongPairSet; +import org.testng.annotations.Test; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +@Test(groups = "utils") +public class ConcurrentBitmapSortedLongPairSetTest { + + @Test + public void testAdd() { + ConcurrentBitmapSortedLongPairSet set = new ConcurrentBitmapSortedLongPairSet(); + int items = 10; + for (int i = 0; i < items; i++) { + set.add(1, i); + } + assertEquals(set.size(), items); + + for (int i = 0; i < items; i++) { + set.add(2, i); + } + assertEquals(set.size(), items * 2); + + for (int i = 0; i < items; i++) { + set.add(2, i); + } + assertEquals(set.size(), items * 2); + } + + @Test + public void testRemove() { + ConcurrentBitmapSortedLongPairSet set = new ConcurrentBitmapSortedLongPairSet(); + int items = 10; + for (int i = 0; i < items; i++) { + set.add(1, i); + } + + for (int i = 0; i < items / 2; i++) { + set.remove(1, i); + } + assertEquals(set.size(), items / 2); + + for (int i = 0; i < items / 2; i++) { + set.remove(2, i); + } + assertEquals(set.size(), items / 2); + + for (int i = 0; i < items / 2; i++) { + set.remove(1, i + 10000); + } + assertEquals(set.size(), items / 2); + + for (int i = 0; i < items / 2; i++) { + set.remove(1, i + items / 2); + } + assertEquals(set.size(), 0); + assertTrue(set.isEmpty()); + } + + @Test + public void testContains() { + ConcurrentBitmapSortedLongPairSet set = new ConcurrentBitmapSortedLongPairSet(); + assertFalse(set.contains(1, 1)); + + int items = 10; + for (int i = 0; i < items; i++) { + set.add(1, i); + } + + for (int i = 0; i < items; i++) { + assertTrue(set.contains(1, i)); + } + + assertFalse(set.contains(1, 10000)); + } + + @Test + public void testRemoveUpTo() { + ConcurrentBitmapSortedLongPairSet set = new ConcurrentBitmapSortedLongPairSet(); + set.removeUpTo(0, 1000); + set.removeUpTo(10, 10000); + assertTrue(set.isEmpty()); + + set.add(1, 0); + + int items = 10; + for (int i = 0; i < items; i++) { + set.add(1, i); + } + + set.removeUpTo(1, 5); + assertFalse(set.isEmpty()); + assertEquals(set.size(), 5); + + for (int i = 5; i < items; i++) { + assertTrue(set.contains(1, i)); + } + + set.removeUpTo(2, 0); + assertTrue(set.isEmpty()); + } + + @Test + public void testItems() { + ConcurrentBitmapSortedLongPairSet set = new ConcurrentBitmapSortedLongPairSet(); + Set items = set.items(10, ConcurrentLongPairSet.LongPair::new); + assertEquals(items.size(), 0); + for (int i = 0; i < 100; i++) { + set.add(1, i); + set.add(2, i); + set.add(5, i); + } + for (int i = 0; i < 100; i++) { + set.add(1, i + 1000); + set.add(2, i + 1000); + set.add(5, i + 1000); + } + + for (int i = 0; i < 100; i++) { + set.add(1, i + 500); + set.add(2, i + 500); + set.add(5, i + 500); + } + assertEquals(set.size(), 900); + assertFalse(set.isEmpty()); + items = set.items(10, ConcurrentLongPairSet.LongPair::new); + assertEquals(items.size(), 10); + ConcurrentLongPairSet.LongPair last = null; + for (ConcurrentLongPairSet.LongPair item : items) { + if (last != null) { + assertTrue(item.compareTo(last) > 0); + } + last = item; + } + + items = set.items(900, ConcurrentLongPairSet.LongPair::new); + assertEquals(items.size(), 900); + last = null; + for (ConcurrentLongPairSet.LongPair item : items) { + if (last != null) { + assertTrue(item.compareTo(last) > 0); + } + last = item; + } + + items = set.items(1000, ConcurrentLongPairSet.LongPair::new); + assertEquals(items.size(), 900); + } + + @Test + public void concurrentInsertions() throws Throwable { + ConcurrentBitmapSortedLongPairSet set = new ConcurrentBitmapSortedLongPairSet(); + + @Cleanup("shutdownNow") + ExecutorService executor = Executors.newCachedThreadPool(); + + final int nThreads = 8; + final int N = 1000; + + List> futures = new ArrayList<>(); + for (int i = 0; i < nThreads; i++) { + final int threadIdx = i; + + futures.add(executor.submit(() -> { + Random random = new Random(); + + for (int j = 0; j < N; j++) { + int key = random.nextInt(); + // Ensure keys are unique + key -= key % (threadIdx + 1); + key = Math.abs(key); + set.add(key, key); + } + })); + } + + for (Future future : futures) { + future.get(); + } + + assertEquals(set.size(), N * nThreads); + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/utils/LogIndexLagBackOffTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/utils/LogIndexLagBackOffTest.java new file mode 100644 index 0000000000000..8d4f2c356a692 --- /dev/null +++ b/pulsar-broker/src/test/java/org/apache/pulsar/utils/LogIndexLagBackOffTest.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.utils; + +import org.apache.pulsar.broker.transaction.util.LogIndexLagBackoff; +import org.testng.Assert; +import org.testng.annotations.Test; + +@Test(groups = "utils") +public class LogIndexLagBackOffTest { + @Test + public void testGenerateNextLogIndexLag() { + LogIndexLagBackoff logIndexLagBackoff = new LogIndexLagBackoff(1, 10, 1); + Assert.assertEquals(logIndexLagBackoff.next(0), 1); + Assert.assertEquals(logIndexLagBackoff.next(6), 6); + + Assert.assertEquals(logIndexLagBackoff.next(77), 10); + + logIndexLagBackoff = new LogIndexLagBackoff(1, 10, 2); + Assert.assertEquals(logIndexLagBackoff.next(3), 9); + + try { + new LogIndexLagBackoff(-1, 2, 3); + } catch (IllegalArgumentException e) { + Assert.assertEquals(e.getMessage(), "min lag must be > 0"); + } + try { + new LogIndexLagBackoff(2, 1, 3); + } catch (IllegalArgumentException e) { + Assert.assertEquals(e.getMessage(), "maxLag should be >= minLag"); + } + try { + new LogIndexLagBackoff(1, 1, 0.2); + } catch (IllegalArgumentException e) { + Assert.assertEquals(e.getMessage(), "exponent must be > 0"); + } + + } +} diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/utils/SimpleTextOutputStreamTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/utils/SimpleTextOutputStreamTest.java index e463c8d45ad4d..1722c4bbc131e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/utils/SimpleTextOutputStreamTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/utils/SimpleTextOutputStreamTest.java @@ -118,4 +118,27 @@ public String str() { reset(); return s; } + + @Test + public void testWriteString() { + String str = "persistence://test/test/test_¬¬¬¬¬¬¬aabbcc"; + stream.write(str); + assertEquals(str, str()); + } + + + @Test + public void testWriteChar() { + String str = "persistence://test/test/test_¬¬¬¬¬¬¬aabbcc\"\n"; + for (char c : str.toCharArray()) { + stream.write(c); + } + assertEquals(str, str()); + + buf.clear(); + + stream.write('\n').write('"').write('A').write('Z').write('a').write('z').write(' ').write(',').write('{') + .write('}').write('[').write(']').write('¬'); + assertEquals(str(), "\n\"AZaz ,{}[]¬"); + } } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthenticationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthenticationTest.java index 5741a5eb0e648..b848fa76d5498 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthenticationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthenticationTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.websocket.proxy; import static java.util.concurrent.Executors.newFixedThreadPool; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.mockito.ArgumentMatchers.anyString; @@ -86,7 +87,7 @@ public void setup() throws Exception { config.setAnonymousUserRole("anonymousUser"); } - service = spy(new WebSocketService(config)); + service = spyWithClassAndConstructorArgs(WebSocketService.class, config); doReturn(new ZKMetadataStore(mockZooKeeperGlobal)).when(service).createMetadataStore(anyString(), anyInt()); proxyServer = new ProxyServer(config); WebSocketServiceStarter.start(proxyServer, service); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthorizationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthorizationTest.java index 78f33706d5355..a2758b72a4e0f 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthorizationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyAuthorizationTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.websocket.proxy; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; @@ -68,7 +69,7 @@ protected void setup() throws Exception { config.setClusterName("c1"); config.setWebServicePort(Optional.of(0)); config.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); - service = spy(new WebSocketService(config)); + service = spyWithClassAndConstructorArgs(WebSocketService.class, config); doReturn(new ZKMetadataStore(mockZooKeeperGlobal)).when(service).createMetadataStore(anyString(), anyInt()); service.start(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyConfigurationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyConfigurationTest.java index 3848d6ecc1dc6..184f86340fa9a 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyConfigurationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyConfigurationTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.websocket.proxy; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; @@ -65,13 +66,19 @@ public Object[][] setProxyConfig() { public void configTest(int numIoThreads, int connectionsPerBroker) throws Exception { config.setWebSocketNumIoThreads(numIoThreads); config.setWebSocketConnectionsPerBroker(connectionsPerBroker); - WebSocketService service = spy(new WebSocketService(config)); + config.getProperties().setProperty("brokerClient_serviceUrl", "https://broker.com:8080"); + config.setServiceUrl("http://localhost:8080"); + config.getProperties().setProperty("brokerClient_lookupTimeoutMs", "100"); + WebSocketService service = spyWithClassAndConstructorArgs(WebSocketService.class, config); doReturn(new ZKMetadataStore(mockZooKeeperGlobal)).when(service).createMetadataStore(anyString(), anyInt()); service.start(); PulsarClientImpl client = (PulsarClientImpl) service.getPulsarClient(); assertEquals(client.getConfiguration().getNumIoThreads(), numIoThreads); assertEquals(client.getConfiguration().getConnectionsPerBroker(), connectionsPerBroker); + assertEquals(client.getConfiguration().getServiceUrl(), "http://localhost:8080", + "brokerClient_ configs take precedence"); + assertEquals(client.getConfiguration().getLookupTimeoutMs(), 100); service.close(); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTest.java index 941e410cd30a5..1e74cdae78751 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTest.java @@ -19,10 +19,10 @@ package org.apache.pulsar.websocket.proxy; import static java.util.concurrent.Executors.newFixedThreadPool; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.spy; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; @@ -34,6 +34,7 @@ import com.google.gson.reflect.TypeToken; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -77,7 +78,6 @@ import org.glassfish.jersey.logging.LoggingFeature; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @@ -104,7 +104,7 @@ public void setup() throws Exception { config.setWebServicePort(Optional.of(0)); config.setClusterName("test"); config.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); - service = spy(new WebSocketService(config)); + service = spyWithClassAndConstructorArgs(WebSocketService.class, config); doReturn(new ZKMetadataStore(mockZooKeeperGlobal)).when(service).createMetadataStore(anyString(), anyInt()); proxyServer = new ProxyServer(config); WebSocketServiceStarter.start(proxyServer, service); @@ -290,14 +290,14 @@ public void unsubscribeTest() throws Exception { Future consumerFuture = consumeClient.connect(consumeSocket, consumeUri, consumeRequest); consumerFuture.get(); List subs = admin.topics().getSubscriptions(topic); - Assert.assertEquals(subs.size(), 1); - Assert.assertEquals(subs.get(0), subscription); + assertEquals(subs.size(), 1); + assertEquals(subs.get(0), subscription); // do unsubscribe consumeSocket.unsubscribe(); //wait for delete Thread.sleep(1000); subs = admin.topics().getSubscriptions(topic); - Assert.assertEquals(subs.size(), 0); + assertEquals(subs.size(), 0); } finally { stopWebSocketClient(consumeClient); } @@ -892,6 +892,106 @@ public void nackRedeliveryDelayTest() throws Exception { } } + @Test(timeOut = 20000) + public void ackBatchMessageTest() throws Exception { + final String subscription = "my-sub"; + final String topic = "my-property/my-ns/ack-batch-message" + UUID.randomUUID(); + final String consumerUri = "ws://localhost:" + proxyServer.getListenPortHTTP().get() + + "/ws/v2/consumer/persistent/" + topic + "/" + subscription; + final int messages = 10; + + WebSocketClient consumerClient = new WebSocketClient(); + SimpleConsumerSocket consumeSocket = new SimpleConsumerSocket(); + Producer producer = pulsarClient.newProducer() + .topic(topic) + .batchingMaxPublishDelay(1, TimeUnit.SECONDS) + .create(); + + try { + consumerClient.start(); + ClientUpgradeRequest consumerRequest = new ClientUpgradeRequest(); + Future consumerFuture = consumerClient.connect(consumeSocket, URI.create(consumerUri), consumerRequest); + + assertTrue(consumerFuture.get().isOpen()); + assertEquals(consumeSocket.getReceivedMessagesCount(), 0); + + for (int i = 0; i < messages; i++) { + producer.sendAsync(String.valueOf(i).getBytes(StandardCharsets.UTF_8)); + } + + producer.flush(); + consumeSocket.sendPermits(messages); + Awaitility.await().untilAsserted(() -> + assertEquals(consumeSocket.getReceivedMessagesCount(), messages)); + + // The message should not be acked since we only acked 1 message of the batch message + Awaitility.await().untilAsserted(() -> + assertEquals(admin.topics().getStats(topic).getSubscriptions() + .get(subscription).getMsgBacklog(), 0)); + + } finally { + stopWebSocketClient(consumerClient); + } + } + + @Test(timeOut = 20000) + public void consumeEncryptedMessages() throws Exception { + final String subscription = "my-sub"; + final String topic = "my-property/my-ns/encrypted" + UUID.randomUUID(); + final String consumerUri = "ws://localhost:" + proxyServer.getListenPortHTTP().get() + + "/ws/v2/consumer/persistent/" + topic + "/" + subscription + "?cryptoFailureAction=CONSUME"; + final int messages = 10; + + WebSocketClient consumerClient = new WebSocketClient(); + SimpleConsumerSocket consumeSocket = new SimpleConsumerSocket(); + + + final String rsaPublicKeyData = "data:application/x-pem-file;base64,LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF0S1d3Z3FkblRZck9DditqMU1rVApXZlNIMHdDc0haWmNhOXdBVzNxUDR1dWhsQnZuYjEwSmNGZjVaanpQOUJTWEsrdEhtSTh1b04zNjh2RXY2eWhVClJITTR5dVhxekN4enVBd2tRU28zOXJ6WDhQR0M3cWRqQ043TERKM01ucWlCSXJVc1NhRVAxd3JOc0Ixa0krbzkKRVIxZTVPL3VFUEFvdFA5MzNoSFEwSjJoTUVla0hxTDdzQmxKOThoNk5tc2ljRWFVa2FyZGswVE9YcmxrakMrYwpNZDhaYkdTY1BxSTlNMzhibW4zT0x4RlRuMXZ0aHB2blhMdkNtRzRNKzZ4dFl0RCtucGNWUFp3MWkxUjkwZk1zCjdwcFpuUmJ2OEhjL0RGZE9LVlFJZ2FtNkNEZG5OS2dXN2M3SUJNclAwQUVtMzdIVHUwTFNPalAyT0hYbHZ2bFEKR1FJREFRQUIKLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg=="; + final String rsaPrivateKeyData = "data:application/x-pem-file;base64,LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEtXd2dxZG5UWXJPQ3YrajFNa1RXZlNIMHdDc0haWmNhOXdBVzNxUDR1dWhsQnZuCmIxMEpjRmY1Wmp6UDlCU1hLK3RIbUk4dW9OMzY4dkV2NnloVVJITTR5dVhxekN4enVBd2tRU28zOXJ6WDhQR0MKN3FkakNON0xESjNNbnFpQklyVXNTYUVQMXdyTnNCMWtJK285RVIxZTVPL3VFUEFvdFA5MzNoSFEwSjJoTUVlawpIcUw3c0JsSjk4aDZObXNpY0VhVWthcmRrMFRPWHJsa2pDK2NNZDhaYkdTY1BxSTlNMzhibW4zT0x4RlRuMXZ0Cmhwdm5YTHZDbUc0TSs2eHRZdEQrbnBjVlBadzFpMVI5MGZNczdwcFpuUmJ2OEhjL0RGZE9LVlFJZ2FtNkNEZG4KTktnVzdjN0lCTXJQMEFFbTM3SFR1MExTT2pQMk9IWGx2dmxRR1FJREFRQUJBb0lCQUFhSkZBaTJDN3UzY05yZgpBc3RZOXZWRExvTEl2SEZabGtCa3RqS1pEWW1WSXNSYitoU0NWaXdWVXJXTEw2N1I2K0l2NGVnNERlVE9BeDAwCjhwbmNYS2daVHcyd0liMS9RalIvWS9SamxhQzhsa2RtUldsaTd1ZE1RQ1pWc3lodVNqVzZQajd2cjhZRTR3b2oKRmhOaWp4RUdjZjl3V3JtTUpyemRuVFdRaVhCeW8rZVR2VVE5QlBnUEdyUmpzTVptVGtMeUFWSmZmMkRmeE81YgpJV0ZEWURKY3lZQU1DSU1RdTd2eXMvSTUwb3U2aWxiMUNPNlFNNlo3S3BQZU9vVkZQd3R6Ymg4Y2Y5eE04VU5TCmo2Si9KbWRXaGdJMzRHUzNOQTY4eFRRNlBWN3pqbmhDYytpY2NtM0pLeXpHWHdhQXBBWitFb2NlLzlqNFdLbXUKNUI0emlSMENnWUVBM2wvOU9IYmwxem15VityUnhXT0lqL2kyclR2SHp3Qm5iblBKeXVlbUw1Vk1GZHBHb2RRMwp2d0h2eVFtY0VDUlZSeG1Yb2pRNFF1UFBIczNxcDZ3RUVGUENXeENoTFNUeGxVYzg1U09GSFdVMk85OWpWN3pJCjcrSk9wREsvTXN0c3g5bkhnWGR1SkYrZ2xURnRBM0xIOE9xeWx6dTJhRlBzcHJ3S3VaZjk0UThDZ1lFQXovWngKYWtFRytQRU10UDVZUzI4Y1g1WGZqc0lYL1YyNkZzNi9zSDE2UWpVSUVkZEU1VDRmQ3Vva3hDalNpd1VjV2htbApwSEVKNVM1eHAzVllSZklTVzNqUlczcXN0SUgxdHBaaXBCNitTMHpUdUptTEpiQTNJaVdFZzJydE10N1gxdUp2CkEvYllPcWUwaE9QVHVYdVpkdFZaMG5NVEtrN0dHOE82VmtCSTdGY0NnWUVBa0RmQ21zY0pnczdKYWhsQldIbVgKekg5cHdlbStTUEtqSWMvNE5CNk4rZGdpa3gyUHAwNWhwUC9WaWhVd1lJdWZ2cy9MTm9nVllOUXJ0SGVwVW5yTgoyK1RtYkhiWmdOU3YxTGR4dDgyVWZCN3kwRnV0S3U2bGhtWEh5TmVjaG8zRmk4c2loMFYwYWlTV21ZdUhmckFICkdhaXNrRVpLbzFpaVp2UVhKSXg5TzJNQ2dZQVRCZjByOWhUWU10eXh0YzZIMy9zZGQwMUM5dGhROGdEeTB5alAKMFRxYzBkTVNKcm9EcW1JV2tvS1lldzkvYmhGQTRMVzVUQ25Xa0NBUGJIbU50RzRmZGZiWXdta0gvaGRuQTJ5MApqS2RscGZwOEdYZVVGQUdIR3gxN0ZBM3NxRnZnS1VoMGVXRWdSSFVMN3ZkUU1WRkJnSlM5M283elFNOTRmTGdQCjZjT0I4d0tCZ0ZjR1Y0R2pJMld3OWNpbGxhQzU1NE12b1NqZjhCLyswNGtYekRPaDhpWUlJek85RVVpbDFqaksKSnZ4cDRobkx6VEtXYnV4M01FV3F1ckxrWWFzNkdwS0JqdytpTk9DYXI2WWRxV0dWcU0zUlV4N1BUVWFad2tLeApVZFA2M0lmWTdpWkNJVC9RYnlIUXZJVWUyTWFpVm5IK3VseGRrSzZZNWU3Z3hjYmNrSUg0Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="; + + Producer producer = pulsarClient.newProducer() + .topic(topic) + .enableBatching(false) + .defaultCryptoKeyReader(rsaPublicKeyData) + .addEncryptionKey("ws-consumer-a") + .create(); + + try { + consumerClient.start(); + ClientUpgradeRequest consumerRequest = new ClientUpgradeRequest(); + Future consumerFuture = consumerClient.connect(consumeSocket, URI.create(consumerUri), consumerRequest); + + assertTrue(consumerFuture.get().isOpen()); + assertEquals(consumeSocket.getReceivedMessagesCount(), 0); + + for (int i = 0; i < messages; i++) { + producer.sendAsync(String.valueOf(i).getBytes(StandardCharsets.UTF_8)); + } + + producer.flush(); + consumeSocket.sendPermits(messages); + Awaitility.await().untilAsserted(() -> + assertEquals(consumeSocket.getReceivedMessagesCount(), messages)); + + for (JsonObject msg : consumeSocket.messages) { + assertTrue(msg.has("encryptionContext")); + JsonObject encryptionCtx = msg.getAsJsonObject("encryptionContext"); + JsonObject keys = encryptionCtx.getAsJsonObject("keys"); + assertTrue(keys.has("ws-consumer-a")); + + assertTrue(keys.getAsJsonObject("ws-consumer-a").has("keyValue")); + } + + // The message should not be acked since we only acked 1 message of the batch message + Awaitility.await().untilAsserted(() -> + assertEquals(admin.topics().getStats(topic).getSubscriptions() + .get(subscription).getMsgBacklog(), 0)); + + } finally { + stopWebSocketClient(consumerClient); + } + } + private void verifyTopicStat(Client client, String baseUrl, String topic) { String statUrl = baseUrl + topic + "/stats"; WebTarget webTarget = client.target(statUrl); @@ -946,13 +1046,13 @@ private void verifyProxyStats(Client client, String baseUrl, String topic) { // number of consumers are connected = 2 (one is reader) assertEquals(stats.consumerStats.size(), 2); ConsumerStats consumerStats = stats.consumerStats.iterator().next(); - // Assert.assertTrue(consumerStats.numberOfMsgDelivered > 0); + assertTrue(consumerStats.numberOfMsgDelivered > 0); assertNotNull(consumerStats.remoteConnection); // number of producers are connected = 1 assertEquals(stats.producerStats.size(), 1); ProducerStats producerStats = stats.producerStats.iterator().next(); - // Assert.assertTrue(producerStats.numberOfMsgPublished > 0); + assertTrue(producerStats.numberOfMsgPublished > 0); assertNotNull(producerStats.remoteConnection); } diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java index 4c82615aa7fc9..cdc2eb58d9a09 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeTlsTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.websocket.proxy; import static java.util.concurrent.Executors.newFixedThreadPool; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; @@ -76,7 +77,7 @@ public void setup() throws Exception { config.setBrokerClientAuthenticationParameters("tlsCertFile:" + TLS_CLIENT_CERT_FILE_PATH + ",tlsKeyFile:" + TLS_CLIENT_KEY_FILE_PATH); config.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); config.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); - service = spy(new WebSocketService(config)); + service = spyWithClassAndConstructorArgs(WebSocketService.class, config); doReturn(new ZKMetadataStore(mockZooKeeperGlobal)).when(service).createMetadataStore(anyString(), anyInt()); proxyServer = new ProxyServer(config); WebSocketServiceStarter.start(proxyServer, service); @@ -106,7 +107,7 @@ public void socketTest() throws GeneralSecurityException { SslContextFactory sslContextFactory = new SslContextFactory(); sslContextFactory.setSslContext(SecurityUtility - .createSslContext(false, SecurityUtility.loadCertificatesFromPemFile(TLS_TRUST_CERT_FILE_PATH))); + .createSslContext(false, SecurityUtility.loadCertificatesFromPemFile(TLS_TRUST_CERT_FILE_PATH), null)); WebSocketClient consumeClient = new WebSocketClient(sslContextFactory); SimpleConsumerSocket consumeSocket = new SimpleConsumerSocket(); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeWithoutZKTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeWithoutZKTest.java index 485f23bdeb165..5baaacd52d9c2 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeWithoutZKTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/ProxyPublishConsumeWithoutZKTest.java @@ -19,6 +19,7 @@ package org.apache.pulsar.websocket.proxy; import static java.util.concurrent.Executors.newFixedThreadPool; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; @@ -63,7 +64,7 @@ public void setup() throws Exception { config.setClusterName("test"); config.setServiceUrl(pulsar.getSafeWebServiceAddress()); config.setServiceUrlTls(pulsar.getWebServiceAddressTls()); - service = spy(new WebSocketService(config)); + service = spyWithClassAndConstructorArgs(WebSocketService.class, config); doReturn(new ZKMetadataStore(mockZooKeeper)).when(service).createMetadataStore(anyString(), anyInt()); proxyServer = new ProxyServer(config); WebSocketServiceStarter.start(proxyServer, service); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/SimpleConsumerSocket.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/SimpleConsumerSocket.java index 749bfdcd2ba25..b1a9908d7234e 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/SimpleConsumerSocket.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/SimpleConsumerSocket.java @@ -44,6 +44,7 @@ public class SimpleConsumerSocket { private final CountDownLatch closeLatch; private Session session; private final ArrayList consumerBuffer; + final ArrayList messages; private final AtomicInteger receivedMessages = new AtomicInteger(); // Custom message handler to override standard message processing, if it's needed private SimpleConsumerMessageHandler customMessageHandler; @@ -51,6 +52,7 @@ public class SimpleConsumerSocket { public SimpleConsumerSocket() { this.closeLatch = new CountDownLatch(1); consumerBuffer = new ArrayList<>(); + this.messages = new ArrayList<>(); } public boolean awaitClose(int duration, TimeUnit unit) throws InterruptedException { @@ -79,6 +81,7 @@ public void onConnect(Session session) throws InterruptedException { public synchronized void onMessage(String msg) throws JsonParseException, IOException { receivedMessages.incrementAndGet(); JsonObject message = new Gson().fromJson(msg, JsonObject.class); + this.messages.add(message); if (message.get(X_PULSAR_MESSAGE_ID) != null) { String messageId = message.get(X_PULSAR_MESSAGE_ID).getAsString(); consumerBuffer.add(messageId); diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/v1/V1_ProxyAuthenticationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/v1/V1_ProxyAuthenticationTest.java index d315a10c46a28..03227e9587d13 100644 --- a/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/v1/V1_ProxyAuthenticationTest.java +++ b/pulsar-broker/src/test/java/org/apache/pulsar/websocket/proxy/v1/V1_ProxyAuthenticationTest.java @@ -19,11 +19,10 @@ package org.apache.pulsar.websocket.proxy.v1; import static java.util.concurrent.Executors.newFixedThreadPool; +import static org.apache.pulsar.broker.BrokerTestUtil.spyWithClassAndConstructorArgs; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.spy; - import com.google.common.collect.Sets; import java.net.URI; @@ -88,7 +87,7 @@ public void setup() throws Exception { config.setAnonymousUserRole("anonymousUser"); } - service = spy(new WebSocketService(config)); + service = spyWithClassAndConstructorArgs(WebSocketService.class, config); doReturn(new ZKMetadataStore(mockZooKeeperGlobal)).when(service).createMetadataStore(anyString(), anyInt()); proxyServer = new ProxyServer(config); WebSocketServiceStarter.start(proxyServer, service); diff --git a/pulsar-client-1x-base/pom.xml b/pulsar-client-1x-base/pom.xml index d7e6217fd63c0..70e75d446dae0 100644 --- a/pulsar-client-1x-base/pom.xml +++ b/pulsar-client-1x-base/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-1x-base/pulsar-client-1x/pom.xml b/pulsar-client-1x-base/pulsar-client-1x/pom.xml index aa61fb15d152f..6ee0ac15be55d 100644 --- a/pulsar-client-1x-base/pulsar-client-1x/pom.xml +++ b/pulsar-client-1x-base/pulsar-client-1x/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-client-1x-base - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-1x-base/pulsar-client-2x-shaded/pom.xml b/pulsar-client-1x-base/pulsar-client-2x-shaded/pom.xml index 1fa512ecc1b4a..61b59f0bea143 100644 --- a/pulsar-client-1x-base/pulsar-client-2x-shaded/pom.xml +++ b/pulsar-client-1x-base/pulsar-client-2x-shaded/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-client-1x-base - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-admin-api/pom.xml b/pulsar-client-admin-api/pom.xml index b9f68f1b6e2a0..2dcf6d9cc6578 100644 --- a/pulsar-client-admin-api/pom.xml +++ b/pulsar-client-admin-api/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Namespaces.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Namespaces.java index f6f8654f2d4a7..fb1b67e7f1d9b 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Namespaces.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/Namespaces.java @@ -687,6 +687,19 @@ public interface Namespaces { */ CompletableFuture revokePermissionsOnNamespaceAsync(String namespace, String role); + /** + * Get permission to role to access subscription's admin-api. + * @param namespace + * @throws PulsarAdminException + */ + Map> getPermissionOnSubscription(String namespace) throws PulsarAdminException; + + /** + * Get permission to role to access subscription's admin-api asynchronously. + * @param namespace + */ + CompletableFuture>> getPermissionOnSubscriptionAsync(String namespace); + /** * Grant permission to role to access subscription's admin-api. * @param namespace @@ -1237,6 +1250,23 @@ void setAutoTopicCreation(String namespace, AutoTopicCreationOverride autoTopicC CompletableFuture setAutoTopicCreationAsync( String namespace, AutoTopicCreationOverride autoTopicCreationOverride); + /** + * Get the autoTopicCreation info within a namespace. + * + * @param namespace + * @return + * @throws PulsarAdminException + */ + AutoTopicCreationOverride getAutoTopicCreation(String namespace) throws PulsarAdminException; + + /** + * Get the autoTopicCreation info within a namespace asynchronously. + * + * @param namespace + * @return + */ + CompletableFuture getAutoTopicCreationAsync(String namespace); + /** * Removes the autoTopicCreation policy for a given namespace. *

@@ -1322,6 +1352,23 @@ void setAutoSubscriptionCreation( CompletableFuture setAutoSubscriptionCreationAsync( String namespace, AutoSubscriptionCreationOverride autoSubscriptionCreationOverride); + /** + * Get the autoSubscriptionCreation info within a namespace. + * + * @param namespace + * @return + * @throws PulsarAdminException + */ + AutoSubscriptionCreationOverride getAutoSubscriptionCreation(String namespace) throws PulsarAdminException; + + /** + * Get the autoSubscriptionCreation info within a namespace asynchronously. + * + * @param namespace + * @return + */ + CompletableFuture getAutoSubscriptionCreationAsync(String namespace); + /** * Sets the subscriptionTypesEnabled policy for a given namespace, overriding broker settings. * @@ -2438,6 +2485,23 @@ CompletableFuture clearNamespaceBundleBacklogForSubscriptionAsync(String n */ void setEncryptionRequiredStatus(String namespace, boolean encryptionRequired) throws PulsarAdminException; + /** + * Get the encryption required status within a namespace. + * + * @param namespace + * @return + * @throws PulsarAdminException + */ + Boolean getEncryptionRequiredStatus(String namespace) throws PulsarAdminException; + + /** + * Get the encryption required status within a namespace asynchronously. + * + * @param namespace + * @return + */ + CompletableFuture getEncryptionRequiredStatusAsync(String namespace); + /** * Set the encryption required status for all topics within a namespace asynchronously. *

@@ -2646,6 +2710,23 @@ void setSubscriptionAuthMode(String namespace, SubscriptionAuthMode subscription */ CompletableFuture setSubscriptionAuthModeAsync(String namespace, SubscriptionAuthMode subscriptionAuthMode); + /** + * Get the subscriptionAuthMode within a namespace. + * + * @param namespace + * @return + * @throws PulsarAdminException + */ + SubscriptionAuthMode getSubscriptionAuthMode(String namespace) throws PulsarAdminException; + + /** + * Get the subscriptionAuthMode within a namespace asynchronously. + * + * @param namespace + * @return + */ + CompletableFuture getSubscriptionAuthModeAsync(String namespace); + /** * Get the deduplicationSnapshotInterval for a namespace. * @@ -3539,6 +3620,7 @@ void setSchemaAutoUpdateCompatibilityStrategy(String namespace, /** * Get schema validation enforced for namespace. + * @param namespace namespace for this command. * @return the schema validation enforced flag * @throws NotAuthorizedException * Don't have admin permission @@ -3547,16 +3629,39 @@ void setSchemaAutoUpdateCompatibilityStrategy(String namespace, * @throws PulsarAdminException * Unexpected error */ - boolean getSchemaValidationEnforced(String namespace) - throws PulsarAdminException; + boolean getSchemaValidationEnforced(String namespace) throws PulsarAdminException; /** * Get schema validation enforced for namespace asynchronously. + * @param namespace namespace for this command. * * @return the schema validation enforced flag */ CompletableFuture getSchemaValidationEnforcedAsync(String namespace); + /** + * Get schema validation enforced for namespace. + * @param namespace namespace for this command. + * @param applied applied for this command. + * @return the schema validation enforced flag + * @throws NotAuthorizedException + * Don't have admin permission + * @throws NotFoundException + * Tenant or Namespace does not exist + * @throws PulsarAdminException + * Unexpected error + */ + boolean getSchemaValidationEnforced(String namespace, boolean applied) throws PulsarAdminException; + + /** + * Get schema validation enforced for namespace asynchronously. + * @param namespace namespace for this command. + * @param applied applied for this command. + * + * @return the schema validation enforced flag + */ + CompletableFuture getSchemaValidationEnforcedAsync(String namespace, boolean applied); + /** * Set schema validation enforced for namespace. * if a producer without a schema attempts to produce to a topic with schema in this the namespace, the diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/PulsarAdminBuilder.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/PulsarAdminBuilder.java index 9f8b4be140908..c685c1f77936d 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/PulsarAdminBuilder.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/PulsarAdminBuilder.java @@ -36,6 +36,29 @@ public interface PulsarAdminBuilder { */ PulsarAdmin build() throws PulsarClientException; + /** + * Load the configuration from provided config map. + * + *

Example: + * + *

+     * {@code
+     * Map config = new HashMap<>();
+     * config.put("serviceHttpUrl", "http://localhost:6650");
+     *
+     * PulsarAdminBuilder builder = ...;
+     * builder = builder.loadConf(config);
+     *
+     * PulsarAdmin client = builder.build();
+     * }
+     * 
+ * + * @param config + * configuration to load + * @return the client builder instance + */ + PulsarAdminBuilder loadConf(Map config); + /** * Create a copy of the current client builder. *

diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BacklogQuota.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BacklogQuota.java index d4b5c4bba1c5b..4604710c3a68c 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BacklogQuota.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/BacklogQuota.java @@ -28,6 +28,15 @@ */ public interface BacklogQuota { + /** + * Gets quota limit in size. + * Remains for compatible + * + * @return quota limit in bytes + */ + @Deprecated + long getLimit(); + /** * Gets quota limit in size. * diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ConsumerStats.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ConsumerStats.java index 7204af616e2d8..cc421d276c150 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ConsumerStats.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ConsumerStats.java @@ -40,6 +40,11 @@ public interface ConsumerStats { /** Total rate of messages redelivered by this consumer (msg/s). */ double getMsgRateRedeliver(); + /** + * Total rate of message ack(msg/s). + */ + double getMessageAckRate(); + /** Total chunked messages dispatched. */ double getChunkedMessageRate(); diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/Policies.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/Policies.java index 631675fcbf729..dca72da69a8f2 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/Policies.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/Policies.java @@ -101,14 +101,13 @@ public class Policies { @SuppressWarnings("checkstyle:MemberName") @Deprecated - public SchemaAutoUpdateCompatibilityStrategy schema_auto_update_compatibility_strategy = - SchemaAutoUpdateCompatibilityStrategy.Full; + public SchemaAutoUpdateCompatibilityStrategy schema_auto_update_compatibility_strategy = null; @SuppressWarnings("checkstyle:MemberName") public SchemaCompatibilityStrategy schema_compatibility_strategy = SchemaCompatibilityStrategy.UNDEFINED; @SuppressWarnings("checkstyle:MemberName") - public boolean is_allow_auto_update_schema = true; + public Boolean is_allow_auto_update_schema = null; @SuppressWarnings("checkstyle:MemberName") public boolean schema_validation_enforced = false; diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SchemaCompatibilityStrategy.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SchemaCompatibilityStrategy.java index 9a4f74c437b14..f3b4569bad8f3 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SchemaCompatibilityStrategy.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SchemaCompatibilityStrategy.java @@ -71,10 +71,13 @@ public enum SchemaCompatibilityStrategy { FULL_TRANSITIVE; + public static boolean isUndefined(SchemaCompatibilityStrategy strategy) { + return strategy == null || strategy == SchemaCompatibilityStrategy.UNDEFINED; + } public static SchemaCompatibilityStrategy fromAutoUpdatePolicy(SchemaAutoUpdateCompatibilityStrategy strategy) { if (strategy == null) { - return SchemaCompatibilityStrategy.ALWAYS_INCOMPATIBLE; + return null; } switch (strategy) { case Backward: diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SubscriptionStats.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SubscriptionStats.java index 2ce38aafb34d4..1649c6efefe79 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SubscriptionStats.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/SubscriptionStats.java @@ -43,6 +43,11 @@ public interface SubscriptionStats { /** Chunked message dispatch rate. */ int getChunkedMessageRate(); + /** + * Total rate of message ack(msg/s). + */ + double getMessageAckRate(); + /** Number of messages in the subscription backlog. */ long getMsgBacklog(); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionBufferException.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ValidateResult.java similarity index 61% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionBufferException.java rename to pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ValidateResult.java index 3ecbf2b555d3b..13821947cf217 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionBufferException.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/ValidateResult.java @@ -16,24 +16,25 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; +package org.apache.pulsar.common.policies.data; -/** - * The base exception class for the errors thrown from Transaction Buffer. - */ -public abstract class TransactionBufferException extends Exception { +import lombok.Getter; - private static final long serialVersionUID = 0L; +@Getter +public class ValidateResult { + private final boolean success; + private final String errorInfo; - public TransactionBufferException(String message) { - super(message); + private ValidateResult(boolean success, String errorInfo) { + this.success = success; + this.errorInfo = errorInfo; } - public TransactionBufferException(String message, Throwable cause) { - super(message, cause); + public static ValidateResult fail(String errorInfo) { + return new ValidateResult(false, errorInfo); } - public TransactionBufferException(Throwable cause) { - super(cause); + public static ValidateResult success() { + return new ValidateResult(true, null); } } diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/AutoTopicCreationOverrideImpl.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/AutoTopicCreationOverrideImpl.java index 1ce60d1c78b30..ba6bc07780a08 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/AutoTopicCreationOverrideImpl.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/AutoTopicCreationOverrideImpl.java @@ -23,6 +23,7 @@ import lombok.NoArgsConstructor; import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.common.policies.data.ValidateResult; /** * Override of autoTopicCreation settings on a namespace level. @@ -35,28 +36,29 @@ public final class AutoTopicCreationOverrideImpl implements AutoTopicCreationOve private String topicType; private Integer defaultNumPartitions; - public static boolean isValidOverride(AutoTopicCreationOverride override) { + public static ValidateResult validateOverride(AutoTopicCreationOverride override) { if (override == null) { - return false; + return ValidateResult.fail("[AutoTopicCreationOverride] can not be null"); } if (override.isAllowAutoTopicCreation()) { if (!TopicType.isValidTopicType(override.getTopicType())) { - return false; + return ValidateResult.fail(String.format("Unknown topic type [%s]", override.getTopicType())); } if (TopicType.PARTITIONED.toString().equals(override.getTopicType())) { if (override.getDefaultNumPartitions() == null) { - return false; + return ValidateResult.fail("[defaultNumPartitions] cannot be null when the type is partitioned."); } - if (!(override.getDefaultNumPartitions() > 0)) { - return false; + if (override.getDefaultNumPartitions() <= 0) { + return ValidateResult.fail("[defaultNumPartitions] cannot be less than 1 for partition type."); } } else if (TopicType.NON_PARTITIONED.toString().equals(override.getTopicType())) { if (override.getDefaultNumPartitions() != null) { - return false; + return ValidateResult.fail("[defaultNumPartitions] is not allowed to be" + + " set when the type is non-partition."); } } } - return true; + return ValidateResult.success(); } public static AutoTopicCreationOverrideImplBuilder builder() { diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BacklogQuotaImpl.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BacklogQuotaImpl.java index 591e8b8c95a8a..c8073d8fb4066 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BacklogQuotaImpl.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BacklogQuotaImpl.java @@ -18,23 +18,84 @@ */ package org.apache.pulsar.common.policies.data.impl; -import lombok.AllArgsConstructor; -import lombok.Data; +import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; +import lombok.ToString; import org.apache.pulsar.common.policies.data.BacklogQuota; -@Data -@AllArgsConstructor +@ToString +@EqualsAndHashCode @NoArgsConstructor public class BacklogQuotaImpl implements BacklogQuota { public static final long BYTES_IN_GIGABYTE = 1024 * 1024 * 1024; - // backlog quota by size in byte - private long limitSize; - // backlog quota by time in second + /** + * backlog quota by size in byte, remains for compatible. + * for the details: https://github.com/apache/pulsar/pull/13291 + * @since 2.9.1 + */ + @Deprecated + private long limit; + + /** + * backlog quota by size in byte. + */ + private Long limitSize; + + /** + * backlog quota by time in second. + */ private int limitTime; private RetentionPolicy policy; + public BacklogQuotaImpl(long limitSize, int limitTime, RetentionPolicy policy) { + this.limitSize = limitSize; + this.limitTime = limitTime; + this.policy = policy; + } + + @Deprecated + public long getLimit() { + if (limitSize == null) { + return limit; + } + return limitSize; + } + + @Deprecated + public void setLimit(long limit) { + this.limit = limit; + this.limitSize = limit; + } + + public long getLimitSize() { + if (limitSize == null) { + return limit; + } + return limitSize; + } + + public void setLimitSize(long limitSize) { + this.limitSize = limitSize; + this.limit = limitSize; + } + + public int getLimitTime() { + return limitTime; + } + + public void setLimitTime(int limitTime) { + this.limitTime = limitTime; + } + + public RetentionPolicy getPolicy() { + return policy; + } + + public void setPolicy(RetentionPolicy policy) { + this.policy = policy; + } + public static BacklogQuotaImplBuilder builder() { return new BacklogQuotaImplBuilder(); } diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BookieInfoImpl.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BookieInfoImpl.java index de316e612f3c1..b58498903ab48 100644 --- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BookieInfoImpl.java +++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/common/policies/data/impl/BookieInfoImpl.java @@ -21,6 +21,7 @@ import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; +import lombok.NonNull; import org.apache.pulsar.common.policies.data.BookieInfo; /** @@ -40,6 +41,7 @@ public static BookieInfoImplBuilder builder() { public static class BookieInfoImplBuilder implements BookieInfo.Builder { private String rack; private String hostname; + private static final String PATH_SEPARATOR = "/"; public BookieInfoImplBuilder rack(String rack) { this.rack = rack; @@ -52,7 +54,15 @@ public BookieInfoImplBuilder hostname(String hostname) { } public BookieInfoImpl build() { + checkArgument(rack != null && !rack.isEmpty() && !rack.equals(PATH_SEPARATOR), + "rack name is invalid, it should not be null, empty or '/'"); return new BookieInfoImpl(rack, hostname); } + + public static void checkArgument(boolean expression, @NonNull Object errorMessage) { + if (!expression) { + throw new IllegalArgumentException(String.valueOf(errorMessage)); + } + } } } diff --git a/pulsar-client-admin-shaded/pom.xml b/pulsar-client-admin-shaded/pom.xml index 406f4b28d0d84..f2b96114531d4 100644 --- a/pulsar-client-admin-shaded/pom.xml +++ b/pulsar-client-admin-shaded/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-admin/pom.xml b/pulsar-client-admin/pom.xml index 2d28b12e5cbde..697110084f18e 100644 --- a/pulsar-client-admin/pom.xml +++ b/pulsar-client-admin/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BaseResource.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BaseResource.java index 6838fd8b10bbe..e4ea751672a92 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BaseResource.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/BaseResource.java @@ -216,7 +216,7 @@ public PulsarAdminException getApiException(Throwable e) { ServerErrorException see = (ServerErrorException) e; int statusCode = see.getResponse().getStatus(); String httpError = getReasonFromServer(see); - return new ServerSideErrorException(see, e.getMessage(), httpError, statusCode); + return new ServerSideErrorException(see, httpError, httpError, statusCode); } else if (e instanceof ClientErrorException) { // Handle 4xx exceptions ClientErrorException cee = (ClientErrorException) e; diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/NamespacesImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/NamespacesImpl.java index 8307a5f172583..04da2f0f411f5 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/NamespacesImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/NamespacesImpl.java @@ -513,6 +513,39 @@ public CompletableFuture revokePermissionsOnNamespaceAsync(String namespac return asyncDeleteRequest(path); } + @Override + public Map> getPermissionOnSubscription(String namespace) throws PulsarAdminException { + try { + return getPermissionOnSubscriptionAsync(namespace).get(this.readTimeoutMs, TimeUnit.MILLISECONDS); + } catch (ExecutionException e) { + throw (PulsarAdminException) e.getCause(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new PulsarAdminException(e); + } catch (TimeoutException e) { + throw new PulsarAdminException.TimeoutException(e); + } + } + + @Override + public CompletableFuture>> getPermissionOnSubscriptionAsync(String namespace) { + NamespaceName ns = NamespaceName.get(namespace); + WebTarget path = namespacePath(ns, "permissions", "subscription"); + final CompletableFuture>> future = new CompletableFuture<>(); + asyncGetRequest(path, + new InvocationCallback>>() { + @Override + public void completed(Map> permissions) { + future.complete(permissions); + } + + @Override + public void failed(Throwable throwable) { + future.completeExceptionally(getApiException(throwable.getCause())); + } + }); + return future; + } @Override public void grantPermissionOnSubscription(String namespace, String subscription, Set roles) @@ -995,6 +1028,40 @@ public CompletableFuture setAutoTopicCreationAsync( return asyncPostRequest(path, Entity.entity(autoTopicCreationOverride, MediaType.APPLICATION_JSON)); } + @Override + public AutoTopicCreationOverride getAutoTopicCreation(String namespace) throws PulsarAdminException { + try { + return getAutoTopicCreationAsync(namespace).get(this.readTimeoutMs, TimeUnit.MILLISECONDS); + } catch (ExecutionException e) { + throw (PulsarAdminException) e.getCause(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new PulsarAdminException(e); + } catch (TimeoutException e) { + throw new PulsarAdminException.TimeoutException(e); + } + } + + @Override + public CompletableFuture getAutoTopicCreationAsync(String namespace) { + NamespaceName ns = NamespaceName.get(namespace); + WebTarget path = namespacePath(ns, "autoTopicCreation"); + final CompletableFuture future = new CompletableFuture<>(); + asyncGetRequest(path, + new InvocationCallback() { + @Override + public void completed(AutoTopicCreationOverride autoTopicCreationOverride) { + future.complete(autoTopicCreationOverride); + } + + @Override + public void failed(Throwable throwable) { + future.completeExceptionally(getApiException(throwable.getCause())); + } + }); + return future; + } + @Override public void removeAutoTopicCreation(String namespace) throws PulsarAdminException { try { @@ -1040,6 +1107,41 @@ public CompletableFuture setAutoSubscriptionCreationAsync(String namespace return asyncPostRequest(path, Entity.entity(autoSubscriptionCreationOverride, MediaType.APPLICATION_JSON)); } + @Override + public AutoSubscriptionCreationOverride getAutoSubscriptionCreation(String namespace) throws PulsarAdminException { + try { + return getAutoSubscriptionCreationAsync(namespace).get(this.readTimeoutMs, TimeUnit.MILLISECONDS); + } catch (ExecutionException e) { + throw (PulsarAdminException) e.getCause(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new PulsarAdminException(e); + } catch (TimeoutException e) { + throw new PulsarAdminException.TimeoutException(e); + } + } + + @Override + public CompletableFuture getAutoSubscriptionCreationAsync(String namespace) { + NamespaceName ns = NamespaceName.get(namespace); + WebTarget path = namespacePath(ns, "autoSubscriptionCreation"); + final CompletableFuture future = new CompletableFuture<>(); + asyncGetRequest(path, + new InvocationCallback() { + @Override + public void completed(AutoSubscriptionCreationOverride autoSubscriptionCreation) { + future.complete(autoSubscriptionCreation); + } + + @Override + public void failed(Throwable throwable) { + future.completeExceptionally(getApiException(throwable.getCause())); + } + }); + return future; + } + + @Override public void setSubscriptionTypesEnabled( String namespace, Set subscriptionTypesEnabled) throws PulsarAdminException { @@ -2105,6 +2207,40 @@ public CompletableFuture setSubscriptionAuthModeAsync( return asyncPostRequest(path, Entity.entity(subscriptionAuthMode, MediaType.APPLICATION_JSON)); } + @Override + public SubscriptionAuthMode getSubscriptionAuthMode(String namespace) throws PulsarAdminException { + try { + return getSubscriptionAuthModeAsync(namespace).get(this.readTimeoutMs, TimeUnit.MILLISECONDS); + } catch (ExecutionException e) { + throw (PulsarAdminException) e.getCause(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new PulsarAdminException(e); + } catch (TimeoutException e) { + throw new PulsarAdminException.TimeoutException(e); + } + } + + @Override + public CompletableFuture getSubscriptionAuthModeAsync(String namespace) { + NamespaceName ns = NamespaceName.get(namespace); + WebTarget path = namespacePath(ns, "subscriptionAuthMode"); + final CompletableFuture future = new CompletableFuture<>(); + asyncGetRequest(path, + new InvocationCallback() { + @Override + public void completed(SubscriptionAuthMode subscriptionAuthMode) { + future.complete(subscriptionAuthMode); + } + + @Override + public void failed(Throwable throwable) { + future.completeExceptionally(getApiException(throwable.getCause())); + } + }); + return future; + } + @Override public void setEncryptionRequiredStatus(String namespace, boolean encryptionRequired) throws PulsarAdminException { try { @@ -2127,6 +2263,40 @@ public CompletableFuture setEncryptionRequiredStatusAsync(String namespace return asyncPostRequest(path, Entity.entity(encryptionRequired, MediaType.APPLICATION_JSON)); } + @Override + public Boolean getEncryptionRequiredStatus(String namespace) throws PulsarAdminException { + try { + return getEncryptionRequiredStatusAsync(namespace).get(this.readTimeoutMs, TimeUnit.MILLISECONDS); + } catch (ExecutionException e) { + throw (PulsarAdminException) e.getCause(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new PulsarAdminException(e); + } catch (TimeoutException e) { + throw new PulsarAdminException.TimeoutException(e); + } + } + + @Override + public CompletableFuture getEncryptionRequiredStatusAsync(String namespace) { + NamespaceName ns = NamespaceName.get(namespace); + WebTarget path = namespacePath(ns, "encryptionRequired"); + final CompletableFuture future = new CompletableFuture<>(); + asyncGetRequest(path, + new InvocationCallback() { + @Override + public void completed(Boolean enabled) { + future.complete(enabled); + } + + @Override + public void failed(Throwable throwable) { + future.completeExceptionally(getApiException(throwable.getCause())); + } + }); + return future; + } + @Override public DelayedDeliveryPolicies getDelayedDelivery(String namespace) throws PulsarAdminException { try { @@ -3060,11 +3230,21 @@ public void setSchemaAutoUpdateCompatibilityStrategy(String namespace, } @Override - public boolean getSchemaValidationEnforced(String namespace) + public boolean getSchemaValidationEnforced(String namespace) throws PulsarAdminException { + return getSchemaValidationEnforced(namespace, false); + } + + @Override + public CompletableFuture getSchemaValidationEnforcedAsync(String namespace) { + return getSchemaValidationEnforcedAsync(namespace, false); + } + + @Override + public boolean getSchemaValidationEnforced(String namespace, boolean applied) throws PulsarAdminException { try { - return getSchemaValidationEnforcedAsync(namespace). - get(this.readTimeoutMs, TimeUnit.MILLISECONDS); + return getSchemaValidationEnforcedAsync(namespace, applied) + .get(this.readTimeoutMs, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { throw (PulsarAdminException) e.getCause(); } catch (InterruptedException e) { @@ -3076,9 +3256,10 @@ public boolean getSchemaValidationEnforced(String namespace) } @Override - public CompletableFuture getSchemaValidationEnforcedAsync(String namespace) { + public CompletableFuture getSchemaValidationEnforcedAsync(String namespace, boolean applied) { NamespaceName ns = NamespaceName.get(namespace); WebTarget path = namespacePath(ns, "schemaValidationEnforced"); + path = path.queryParam("applied", applied); final CompletableFuture future = new CompletableFuture<>(); asyncGetRequest(path, new InvocationCallback() { diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PackagesImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PackagesImpl.java index 4c7fc4cf29905..779c725daed4a 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PackagesImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PackagesImpl.java @@ -18,10 +18,13 @@ */ package org.apache.pulsar.client.admin.internal; +import static org.asynchttpclient.Dsl.get; import com.google.gson.Gson; +import io.netty.handler.codec.http.HttpHeaders; import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; -import java.io.InputStream; +import java.nio.channels.FileChannel; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -41,8 +44,11 @@ import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.packages.management.core.common.PackageMetadata; import org.apache.pulsar.packages.management.core.common.PackageName; +import org.asynchttpclient.AsyncHandler; import org.asynchttpclient.AsyncHttpClient; import org.asynchttpclient.Dsl; +import org.asynchttpclient.HttpResponseBodyPart; +import org.asynchttpclient.HttpResponseStatus; import org.asynchttpclient.RequestBuilder; import org.asynchttpclient.request.body.multipart.FilePart; import org.asynchttpclient.request.body.multipart.StringPart; @@ -173,30 +179,76 @@ public void download(String packageName, String path) throws PulsarAdminExceptio public CompletableFuture downloadAsync(String packageName, String path) { WebTarget webTarget = packages.path(PackageName.get(packageName).toRestPath()); final CompletableFuture future = new CompletableFuture<>(); - asyncGetRequest(webTarget, new InvocationCallback(){ - @Override - public void completed(Response response) { - if (response.getStatus() == Response.Status.OK.getStatusCode()) { - try (InputStream inputStream = response.readEntity(InputStream.class)) { - Path destinyPath = Paths.get(path); - if (destinyPath.getParent() != null) { - Files.createDirectories(destinyPath.getParent()); + try { + Path destinyPath = Paths.get(path); + if (destinyPath.getParent() != null) { + Files.createDirectories(destinyPath.getParent()); + } + + FileChannel os = new FileOutputStream(destinyPath.toFile()).getChannel(); + RequestBuilder builder = get(webTarget.getUri().toASCIIString()); + + CompletableFuture statusFuture = + httpClient.executeRequest(addAuthHeaders(webTarget, builder).build(), + new AsyncHandler() { + private HttpResponseStatus status; + + @Override + public State onStatusReceived(HttpResponseStatus httpResponseStatus) throws Exception { + status = httpResponseStatus; + if (status.getStatusCode() != Response.Status.OK.getStatusCode()) { + return State.ABORT; + } + return State.CONTINUE; } - Files.copy(inputStream, destinyPath); - future.complete(null); + + @Override + public State onHeadersReceived(HttpHeaders httpHeaders) throws Exception { + return State.CONTINUE; + } + + @Override + public State onBodyPartReceived(HttpResponseBodyPart httpResponseBodyPart) throws Exception { + os.write(httpResponseBodyPart.getBodyByteBuffer()); + return State.CONTINUE; + } + + @Override + public void onThrowable(Throwable throwable) { + // we don't need to handle that throwable and use the returned future to handle it. + } + + @Override + public HttpResponseStatus onCompleted() throws Exception { + return status; + } + }).toCompletableFuture(); + statusFuture + .whenComplete((status, throwable) -> { + try { + os.close(); } catch (IOException e) { - future.completeExceptionally(e); + future.completeExceptionally(getApiException(throwable)); } - } else { - future.completeExceptionally(getApiException(response)); - } - } - - @Override - public void failed(Throwable throwable) { - future.completeExceptionally(throwable); - } - }); + }) + .thenAccept(status -> { + if (status.getStatusCode() < 200 || status.getStatusCode() >= 300) { + future.completeExceptionally( + getApiException(Response + .status(status.getStatusCode()) + .entity(status.getStatusText()) + .build())); + } else { + future.complete(null); + } + }) + .exceptionally(throwable -> { + future.completeExceptionally(getApiException(throwable)); + return null; + }); + } catch (Exception e) { + future.completeExceptionally(getApiException(e)); + } return future; } diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java index 70463b7fb4e9a..d86b9e73457ca 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImpl.java @@ -28,10 +28,11 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.PulsarClientException.UnsupportedAuthenticationException; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.impl.conf.ConfigurationDataUtils; public class PulsarAdminBuilderImpl implements PulsarAdminBuilder { - protected final ClientConfigurationData conf; + protected ClientConfigurationData conf; private int connectTimeout = PulsarAdminImpl.DEFAULT_CONNECT_TIMEOUT_SECONDS; private int readTimeout = PulsarAdminImpl.DEFAULT_READ_TIMEOUT_SECONDS; private int requestTimeout = PulsarAdminImpl.DEFAULT_REQUEST_TIMEOUT_SECONDS; @@ -62,6 +63,12 @@ public PulsarAdminBuilder clone() { return new PulsarAdminBuilderImpl(conf.clone()); } + @Override + public PulsarAdminBuilder loadConf(Map config) { + conf = ConfigurationDataUtils.loadData(config, conf, ClientConfigurationData.class); + return this; + } + @Override public PulsarAdminBuilder serviceHttpUrl(String serviceHttpUrl) { conf.setServiceUrl(serviceHttpUrl); diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminImpl.java index 80cd978814644..427ab6d1aff6d 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/PulsarAdminImpl.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.client.admin.internal; +import static com.google.common.base.Preconditions.checkArgument; import java.io.IOException; import java.net.URL; import java.util.Map; @@ -148,6 +149,8 @@ public PulsarAdminImpl(String serviceUrl, int autoCertRefreshTime, TimeUnit autoCertRefreshTimeUnit, ClassLoader clientBuilderClassLoader) throws PulsarClientException { + checkArgument(StringUtils.isNotBlank(serviceUrl), "Service URL needs to be specified"); + this.connectTimeout = connectTimeout; this.connectTimeoutUnit = connectTimeoutUnit; this.readTimeout = readTimeout; diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/SchemasImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/SchemasImpl.java index a072acd8b73a4..ac485ef425e55 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/SchemasImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/SchemasImpl.java @@ -32,7 +32,6 @@ import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.admin.Schemas; import org.apache.pulsar.client.api.Authentication; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.client.internal.DefaultImplementation; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.protocol.schema.DeleteSchemaResponse; @@ -449,9 +448,10 @@ static SchemaInfo convertGetSchemaResponseToSchemaInfo(TopicName tn, schema = response.getData().getBytes(UTF_8); } - return SchemaInfoImpl.builder() + return SchemaInfo.builder() .schema(schema) .type(response.getType()) + .timestamp(response.getTimestamp()) .properties(response.getProperties()) .name(tn.getLocalName()) .build(); diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java index caf32e4aacf8a..57f36995a8fdf 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/TopicsImpl.java @@ -1602,7 +1602,7 @@ private List> getMessagesFromHttpResponse(String topic, Response } else { brokerEntryMetadata = new BrokerEntryMetadata(); if (brokerEntryTimestamp != null) { - brokerEntryMetadata.setBrokerTimestamp(DateFormatter.parse(brokerEntryTimestamp.toString())); + brokerEntryMetadata.setBrokerTimestamp(DateFormatter.parse(brokerEntryTimestamp)); } if (brokerEntryIndex != null) { diff --git a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/http/AsyncHttpConnector.java b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/http/AsyncHttpConnector.java index 3e17a38da2b34..b8e256268ea78 100644 --- a/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/http/AsyncHttpConnector.java +++ b/pulsar-client-admin/src/main/java/org/apache/pulsar/client/admin/internal/http/AsyncHttpConnector.java @@ -21,6 +21,7 @@ import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslProvider; import io.netty.util.concurrent.DefaultThreadFactory; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -50,6 +51,7 @@ import org.apache.pulsar.client.api.KeyStoreParams; import org.apache.pulsar.client.impl.PulsarServiceNameResolver; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.util.WithSNISslEngineFactory; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.SecurityUtility; import org.apache.pulsar.common.util.keystoretls.KeyStoreSSLContext; @@ -95,6 +97,7 @@ public AsyncHttpConnector(int connectTimeoutMs, int readTimeoutMs, int requestTimeoutMs, int autoCertRefreshTimeSeconds, ClientConfigurationData conf) { DefaultAsyncHttpClientConfig.Builder confBuilder = new DefaultAsyncHttpClientConfig.Builder(); + confBuilder.setUseProxyProperties(true); confBuilder.setFollowRedirect(true); confBuilder.setRequestTimeout(conf.getRequestTimeoutMs()); confBuilder.setConnectTimeout(connectTimeoutMs); @@ -127,7 +130,7 @@ public boolean keepAlive(InetSocketAddress remoteAddress, Request ahcRequest, params != null ? params.getKeyStoreType() : null, params != null ? params.getKeyStorePath() : null, params != null ? params.getKeyStorePassword() : null, - conf.isTlsAllowInsecureConnection() || !conf.isTlsHostnameVerificationEnable(), + conf.isTlsAllowInsecureConnection(), conf.getTlsTrustStoreType(), conf.getTlsTrustStorePath(), conf.getTlsTrustStorePassword(), @@ -137,25 +140,41 @@ public boolean keepAlive(InetSocketAddress remoteAddress, Request ahcRequest, JsseSslEngineFactory sslEngineFactory = new JsseSslEngineFactory(sslCtx); confBuilder.setSslEngineFactory(sslEngineFactory); } else { + SslProvider sslProvider = null; + if (conf.getSslProvider() != null) { + sslProvider = SslProvider.valueOf(conf.getSslProvider()); + } SslContext sslCtx = null; if (authData.hasDataForTls()) { sslCtx = authData.getTlsTrustStoreStream() == null ? SecurityUtility.createAutoRefreshSslContextForClient( - conf.isTlsAllowInsecureConnection() || !conf.isTlsHostnameVerificationEnable(), - conf.getTlsTrustCertsFilePath(), authData.getTlsCerificateFilePath(), - authData.getTlsPrivateKeyFilePath(), null, autoCertRefreshTimeSeconds, delayer) + sslProvider, + conf.isTlsAllowInsecureConnection(), + conf.getTlsTrustCertsFilePath(), authData.getTlsCerificateFilePath(), + authData.getTlsPrivateKeyFilePath(), null, autoCertRefreshTimeSeconds, delayer) : SecurityUtility.createNettySslContextForClient( - conf.isTlsAllowInsecureConnection() || !conf.isTlsHostnameVerificationEnable(), - authData.getTlsTrustStoreStream(), authData.getTlsCertificates(), - authData.getTlsPrivateKey()); + sslProvider, + conf.isTlsAllowInsecureConnection(), + authData.getTlsTrustStoreStream(), authData.getTlsCertificates(), + authData.getTlsPrivateKey(), + conf.getTlsCiphers(), + conf.getTlsProtocols()); } else { sslCtx = SecurityUtility.createNettySslContextForClient( - conf.isTlsAllowInsecureConnection() || !conf.isTlsHostnameVerificationEnable(), - conf.getTlsTrustCertsFilePath()); + sslProvider, + conf.isTlsAllowInsecureConnection(), + conf.getTlsTrustCertsFilePath(), + conf.getTlsCiphers(), + conf.getTlsProtocols()); } confBuilder.setSslContext(sslCtx); + if (!conf.isTlsHostnameVerificationEnable()) { + confBuilder.setSslEngineFactory(new WithSNISslEngineFactory(serviceNameResolver + .resolveHostUri().getHost())); + } } } + confBuilder.setDisableHttpsEndpointIdentificationAlgorithm(!conf.isTlsHostnameVerificationEnable()); } httpClient = new DefaultAsyncHttpClient(confBuilder.build()); this.readTimeout = Duration.ofMillis(readTimeoutMs); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/NoTxnsCommittedAtLedgerException.java b/pulsar-client-admin/src/test/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImplTest.java similarity index 55% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/NoTxnsCommittedAtLedgerException.java rename to pulsar-client-admin/src/test/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImplTest.java index b20b29a575e13..1ea45401eec76 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/NoTxnsCommittedAtLedgerException.java +++ b/pulsar-client-admin/src/test/java/org/apache/pulsar/client/admin/internal/PulsarAdminBuilderImplTest.java @@ -16,16 +16,23 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; +package org.apache.pulsar.client.admin.internal; -/** - * Exception is thrown when no transactions found committed at a given ledger. - */ -public class NoTxnsCommittedAtLedgerException extends TransactionBufferException { +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.api.PulsarClientException; +import org.testng.annotations.Test; - private static final long serialVersionUID = 0L; +public class PulsarAdminBuilderImplTest { - public NoTxnsCommittedAtLedgerException(String message) { - super(message); + @Test + public void testAdminBuilderWithServiceUrlNotSet() throws PulsarClientException { + try{ + PulsarAdmin.builder().build(); + fail(); + } catch (IllegalArgumentException exception) { + assertEquals("Service URL needs to be specified", exception.getMessage()); + } } } diff --git a/pulsar-client-all/pom.xml b/pulsar-client-all/pom.xml index f829386a18ed5..68ee34ce87b8e 100644 --- a/pulsar-client-all/pom.xml +++ b/pulsar-client-all/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-api/pom.xml b/pulsar-client-api/pom.xml index 949901d2cdbb8..dc541415c2c91 100644 --- a/pulsar-client-api/pom.xml +++ b/pulsar-client-api/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java index 3c3ce177177f3..4b822a0cd1281 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ConsumerBuilder.java @@ -677,7 +677,7 @@ public interface ConsumerBuilder extends Cloneable { * the outstanding unchunked-messages by silently acking or asking broker to redeliver later by marking it unacked. * This behavior can be controlled by configuration: @autoAckOldestChunkedMessageOnQueueFull * - * @default 100 + * The default value is 10. * * @param maxPendingChuckedMessage * @return @@ -702,7 +702,7 @@ public interface ConsumerBuilder extends Cloneable { * the outstanding unchunked-messages by silently acking or asking broker to redeliver later by marking it unacked. * This behavior can be controlled by configuration: @autoAckOldestChunkedMessageOnQueueFull * - * @default 100 + * The default value is 10. * * @param maxPendingChunkedMessage * @return diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/DeadLetterPolicy.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/DeadLetterPolicy.java index 279a5c508f5a1..91a53c99409a1 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/DeadLetterPolicy.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/DeadLetterPolicy.java @@ -18,8 +18,10 @@ */ package org.apache.pulsar.client.api; +import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; +import lombok.NoArgsConstructor; import org.apache.pulsar.common.classification.InterfaceAudience; import org.apache.pulsar.common.classification.InterfaceStability; @@ -30,6 +32,8 @@ */ @Builder @Data +@NoArgsConstructor +@AllArgsConstructor @InterfaceAudience.Public @InterfaceStability.Stable public class DeadLetterPolicy { diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinition.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinition.java index dffa5e421b202..c0d184f48f24c 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinition.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinition.java @@ -76,6 +76,13 @@ static SchemaDefinitionBuilder builder() { */ Class getPojo(); + /** + * Get pojo classLoader. + * + * @return pojo schema + */ + ClassLoader getClassLoader(); + /** * Get supportSchemaVersioning schema definition. * diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinitionBuilder.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinitionBuilder.java index 61d246674a8dd..97d822b927d20 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinitionBuilder.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/schema/SchemaDefinitionBuilder.java @@ -80,6 +80,15 @@ public interface SchemaDefinitionBuilder { */ SchemaDefinitionBuilder withPojo(Class pojo); + /** + * Set schema of pojo classLoader. + * + * @param classLoader pojo classLoader + * + * @return schema definition builder + */ + SchemaDefinitionBuilder withClassLoader(ClassLoader classLoader); + /** * Set schema of json definition. * diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionBufferClient.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionBufferClient.java index 24c44f15fc48a..d35f8be73ea8c 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionBufferClient.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionBufferClient.java @@ -91,4 +91,8 @@ CompletableFuture abortTxnOnSubscription(String topic, long lowWaterMark); void close(); + + int getAvailableRequestCredits(); + + int getPendingRequestsCount(); } diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionCoordinatorClientException.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionCoordinatorClientException.java index 0e1f6c79cf536..d7df4e3c0754b 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionCoordinatorClientException.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/transaction/TransactionCoordinatorClientException.java @@ -68,6 +68,11 @@ public static class InvalidTxnStatusException extends TransactionCoordinatorClie public InvalidTxnStatusException(String message) { super(message); } + + public InvalidTxnStatusException(String txnId, String actualState, String expectState) { + super("["+ txnId +"] with unexpected state : " + + actualState + ", expect " + expectState + " state!"); + } } /** @@ -93,6 +98,21 @@ public MetaStoreHandlerNotExistsException(String message) { } } + + /** + * Thrown when transaction meta was timeout. + */ + public static class TransactionTimeotException extends TransactionCoordinatorClientException { + + public TransactionTimeotException(Throwable t) { + super(t); + } + + public TransactionTimeotException(String transactionId) { + super("The transaction " + transactionId + " is timeout."); + } + } + /** * Thrown when send request to transaction meta store but the transaction meta store handler not ready. */ diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PropertiesUtils.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PropertiesUtils.java new file mode 100644 index 0000000000000..4a418b1d5158e --- /dev/null +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PropertiesUtils.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.internal; + +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * Internal utility methods for filtering and mapping {@link Properties} objects. + */ +public class PropertiesUtils { + + /** + * Filters the {@link Properties} object so that only properties with the configured prefix are retained, + * and then removes that prefix and puts the key value pairs into the result map. + * @param props - the properties object to filter + * @param prefix - the prefix to filter against and then remove for keys in the resulting map + * @return a map of properties + */ + public static Map filterAndMapProperties(Properties props, String prefix) { + return filterAndMapProperties(props, prefix, ""); + } + + /** + * Filters the {@link Properties} object so that only properties with the configured prefix are retained, + * and then replaces the srcPrefix with the targetPrefix when putting the key value pairs in the resulting map. + * @param props - the properties object to filter + * @param srcPrefix - the prefix to filter against and then remove for keys in the resulting map + * @param targetPrefix - the prefix to add to keys in the result map + * @return a map of properties + */ + public static Map filterAndMapProperties(Properties props, String srcPrefix, String targetPrefix) { + Map result = new HashMap<>(); + int prefixLength = srcPrefix.length(); + props.forEach((keyObject, value) -> { + if (!(keyObject instanceof String)) { + return; + } + String key = (String) keyObject; + if (key.startsWith(srcPrefix) && value != null) { + String truncatedKey = key.substring(prefixLength); + result.put(targetPrefix + truncatedKey, value); + } + }); + return result; + } +} diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java b/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java index f7bcf05230e82..75cd7dc1feec9 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/client/internal/PulsarClientImplementationBinding.java @@ -251,4 +251,7 @@ static byte[] getBytes(ByteBuffer byteBuffer) { byteBuffer.get(array); return array; } + + SchemaInfo newSchemaInfoImpl(String name, byte[] schema, SchemaType type, long timestamp, + Map propertiesValue); } diff --git a/pulsar-client-api/src/main/java/org/apache/pulsar/common/schema/SchemaInfo.java b/pulsar-client-api/src/main/java/org/apache/pulsar/common/schema/SchemaInfo.java index 01ba7465c112b..077730253119a 100644 --- a/pulsar-client-api/src/main/java/org/apache/pulsar/common/schema/SchemaInfo.java +++ b/pulsar-client-api/src/main/java/org/apache/pulsar/common/schema/SchemaInfo.java @@ -18,8 +18,10 @@ */ package org.apache.pulsar.common.schema; +import java.util.Collections; import java.util.Map; +import org.apache.pulsar.client.internal.DefaultImplementation; import org.apache.pulsar.common.classification.InterfaceAudience; import org.apache.pulsar.common.classification.InterfaceStability; @@ -47,5 +49,62 @@ public interface SchemaInfo { */ Map getProperties(); + /** + * The created time of schema. + */ + long getTimestamp(); + String getSchemaDefinition(); + + static SchemaInfoBuilder builder() { + return new SchemaInfoBuilder(); + } + + class SchemaInfoBuilder { + private String name; + private byte[] schema; + private SchemaType type; + private Map properties; + private boolean propertiesSet; + private long timestamp; + + SchemaInfoBuilder() { + } + + public SchemaInfoBuilder name(String name) { + this.name = name; + return this; + } + + public SchemaInfoBuilder schema(byte[] schema) { + this.schema = schema; + return this; + } + + public SchemaInfoBuilder type(SchemaType type) { + this.type = type; + return this; + } + + public SchemaInfoBuilder properties(Map properties) { + this.properties = properties; + this.propertiesSet = true; + return this; + } + + public SchemaInfoBuilder timestamp(long timestamp) { + this.timestamp = timestamp; + return this; + } + + public SchemaInfo build() { + Map propertiesValue = this.properties; + if (!this.propertiesSet) { + propertiesValue = Collections.emptyMap(); + } + return DefaultImplementation + .getDefaultImplementation() + .newSchemaInfoImpl(name, schema, type, timestamp, propertiesValue); + } + } } diff --git a/pulsar-client-auth-athenz/pom.xml b/pulsar-client-auth-athenz/pom.xml index 5539f3bc124c2..054a3d2fdbe6a 100644 --- a/pulsar-client-auth-athenz/pom.xml +++ b/pulsar-client-auth-athenz/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java b/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java index 1e9588a1df144..447a21ce05645 100644 --- a/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java +++ b/pulsar-client-auth-athenz/src/main/java/org/apache/pulsar/client/impl/auth/AuthenticationAthenz.java @@ -159,7 +159,7 @@ private void setAuthParams(Map authParams) { } this.keyId = authParams.getOrDefault("keyId", "0"); - this.autoPrefetchEnabled = Boolean.valueOf(authParams.getOrDefault("autoPrefetchEnabled", "false")); + this.autoPrefetchEnabled = Boolean.parseBoolean(authParams.getOrDefault("autoPrefetchEnabled", "false")); if (isNotBlank(authParams.get("athenzConfPath"))) { System.setProperty("athenz.athenz_conf", authParams.get("athenzConfPath")); diff --git a/pulsar-client-auth-sasl/pom.xml b/pulsar-client-auth-sasl/pom.xml index 087736b2e11e4..dafbea5b5bc6c 100644 --- a/pulsar-client-auth-sasl/pom.xml +++ b/pulsar-client-auth-sasl/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-cpp/.gitignore b/pulsar-client-cpp/.gitignore index 0f3d36de7ac20..cc4c9043e1e96 100644 --- a/pulsar-client-cpp/.gitignore +++ b/pulsar-client-cpp/.gitignore @@ -47,6 +47,9 @@ lib*.so* /perf/perfConsumer /system-test/SystemTest +# Files generated from templates by CMAKE +include/pulsar/Version.h + # IDE generated files .csettings .cproject @@ -67,6 +70,7 @@ apidocs/ generated/ # CMAKE +.cmake Makefile cmake_install.cmake CMakeFiles diff --git a/pulsar-client-cpp/CMakeLists.txt b/pulsar-client-cpp/CMakeLists.txt index 7c95791544898..5068356410455 100644 --- a/pulsar-client-cpp/CMakeLists.txt +++ b/pulsar-client-cpp/CMakeLists.txt @@ -18,9 +18,14 @@ # cmake_minimum_required(VERSION 3.4) + project (pulsar-cpp) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake_modules") +execute_process(COMMAND python ${CMAKE_SOURCE_DIR}/../src/gen-pulsar-version-macro.py OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE PVM) +set(PVM_COMMENT "This is generated from Version.h.in by CMAKE. DO NOT EDIT DIRECTLY") +configure_file(templates/Version.h.in include/pulsar/Version.h @ONLY) + if (VCPKG_TRIPLET) message(STATUS "Use vcpkg, triplet is ${VCPKG_TRIPLET}") set(CMAKE_PREFIX_PATH "${CMAKE_SOURCE_DIR}/vcpkg_installed/${VCPKG_TRIPLET}") @@ -89,13 +94,13 @@ else() # GCC or Clang are mostly compatible: add_compile_options(-Wall -Wformat-security -Wvla -Werror) # Turn off certain warnings that are too much pain for too little gain: add_compile_options(-Wno-sign-compare -Wno-deprecated-declarations -Wno-error=cpp) - if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") + if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR APPLE) add_compile_options(-msse4.2 -mpclmul) endif() # Options unique to Clang or GCC: if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Qunused-arguments) - elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)) + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.1)) add_compile_options(-Wno-stringop-truncation) endif() endif() @@ -110,13 +115,13 @@ endif(NOT LOG_CATEGORY_NAME) add_definitions(-DLOG_CATEGORY_NAME=${LOG_CATEGORY_NAME} -DBUILDING_PULSAR -DBOOST_ALL_NO_LIB -DBOOST_ALLOW_DEPRECATED_HEADERS) -set(OPENSSL_ROOT_DIR /usr/lib64/) +set(OPENSSL_ROOT_DIR ${OPENSSL_ROOT_DIR} /usr/lib64/) ### This part is to find and keep SSL dynamic libs in RECORD_OPENSSL_SSL_LIBRARY and RECORD_OPENSSL_CRYPTO_LIBRARY -### After find the libs, will unset related cache, and will not affact another same call to find_package. +### After find the libs, will unset related cache, and will not affect another same call to find_package. if (APPLE) set(OPENSSL_INCLUDE_DIR /usr/local/opt/openssl/include/ /opt/homebrew/opt/openssl/include) - set(OPENSSL_ROOT_DIR /usr/local/opt/openssl/ /opt/homebrew/opt/openssl) + set(OPENSSL_ROOT_DIR ${OPENSSL_ROOT_DIR} /usr/local/opt/openssl/ /opt/homebrew/opt/openssl) endif () set(OPENSSL_USE_STATIC_LIBS FALSE) @@ -135,11 +140,15 @@ unset(OPENSSL_VERSION CACHE) if (LINK_STATIC) find_library(ZLIB_LIBRARIES REQUIRED NAMES libz.a z zlib) + message(STATUS "ZLIB_LIBRARIES: ${ZLIB_LIBRARIES}") find_library(Protobuf_LIBRARIES NAMES libprotobuf.a libprotobuf) + message(STATUS "Protobuf: ${Protobuf_LIBRARIES}") find_library(CURL_LIBRARIES NAMES libcurl.a curl curl_a libcurl_a) + message(STATUS "CURL_LIBRARIES: ${CURL_LIBRARIES}") find_library(LIB_ZSTD NAMES libzstd.a) + message(STATUS "ZStd: ${LIB_ZSTD}") find_library(LIB_SNAPPY NAMES libsnappy.a) - message(STATUS "Protobuf_LIBRARIES: ${Protobuf_LIBRARIES}") + message(STATUS "LIB_SNAPPY: ${LIB_SNAPPY}") set(COMMON_LIBS ${Protobuf_LIBRARIES} ${COMMON_LIBS}) if (USE_LOG4CXX) @@ -264,7 +273,7 @@ if (BUILD_PYTHON_WRAPPER) list(GET PYTHONLIBS_VERSION_NO_LIST 1 PYTHONLIBS_VERSION_MINOR) set(BOOST_PYTHON_NAME_POSTFIX ${PYTHONLIBS_VERSION_MAJOR}${PYTHONLIBS_VERSION_MINOR}) # For python3 the lib name is boost_python3 - set(BOOST_PYTHON_NAME_LIST python36;python37;python38;python39;python3;python3-mt;python-py${BOOST_PYTHON_NAME_POSTFIX};python${BOOST_PYTHON_NAME_POSTFIX}-mt;python${BOOST_PYTHON_NAME_POSTFIX}) + set(BOOST_PYTHON_NAME_LIST python36;python37;python38;python39;python310;python3;python3-mt;python-py${BOOST_PYTHON_NAME_POSTFIX};python${BOOST_PYTHON_NAME_POSTFIX}-mt;python${BOOST_PYTHON_NAME_POSTFIX}) else () # Regular boost_python set(BOOST_PYTHON_NAME_LIST python;python-mt;python-py27;python27-mt;python27) @@ -282,6 +291,7 @@ if (BUILD_PYTHON_WRAPPER) MESSAGE(FATAL_ERROR "Could not find Boost Python library") endif () + MESSAGE(STATUS "BOOST_PYTHON_NAME_FOUND: " ${BOOST_PYTHON_NAME_FOUND}) find_package(Boost REQUIRED COMPONENTS ${BOOST_PYTHON_NAME_FOUND}) endif (BUILD_PYTHON_WRAPPER) @@ -330,6 +340,7 @@ file(MAKE_DIRECTORY ${AUTOGEN_DIR}) include_directories( ${CMAKE_SOURCE_DIR} ${CMAKE_SOURCE_DIR}/include + ${CMAKE_BINARY_DIR}/include ${AUTOGEN_DIR} ${Boost_INCLUDE_DIR} ${OPENSSL_INCLUDE_DIR} @@ -441,7 +452,8 @@ add_custom_target(format python ${BUILD_SUPPORT_DIR}/run_clang_format.py ${CMAKE_SOURCE_DIR}/perf ${CMAKE_SOURCE_DIR}/examples ${CMAKE_SOURCE_DIR}/tests - ${CMAKE_SOURCE_DIR}/include) + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_SOURCE_DIR}/python/src) # `make check-format` option (for CI test) add_custom_target(check-format python ${BUILD_SUPPORT_DIR}/run_clang_format.py @@ -452,4 +464,5 @@ add_custom_target(check-format python ${BUILD_SUPPORT_DIR}/run_clang_format.py ${CMAKE_SOURCE_DIR}/perf ${CMAKE_SOURCE_DIR}/examples ${CMAKE_SOURCE_DIR}/tests - ${CMAKE_SOURCE_DIR}/include) + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_SOURCE_DIR}/python/src) diff --git a/pulsar-client-cpp/docker-build-centos7.sh b/pulsar-client-cpp/docker-build-centos7.sh index 5ceeca9a1ab55..e97e374181b7c 100755 --- a/pulsar-client-cpp/docker-build-centos7.sh +++ b/pulsar-client-cpp/docker-build-centos7.sh @@ -32,7 +32,7 @@ cd - VOLUME_OPTION=${VOLUME_OPTION:-"-v $ROOT_DIR:/pulsar"} COMMAND="cd /pulsar/pulsar-client-cpp && mkdir -p _builds && cd _builds && - /opt/cmake/cmake-3.4.0-Linux-x86_64/bin/cmake .. -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=OFF && make" + /opt/cmake/cmake-3.4.0-Linux-x86_64/bin/cmake .. -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=ON && make -j8" DOCKER_CMD="docker run -i ${VOLUME_OPTION} ${IMAGE}" diff --git a/pulsar-client-cpp/docker-build-python3.9.sh b/pulsar-client-cpp/docker-build-python3.9.sh index 15cc2fdb102ee..db5c9abd82a99 100755 --- a/pulsar-client-cpp/docker-build-python3.9.sh +++ b/pulsar-client-cpp/docker-build-python3.9.sh @@ -29,16 +29,18 @@ cd $ROOT_DIR/pulsar-client-cpp # Build manylinux2014 build image PYTHON_VERSION="3.9" PYTHON_SPEC="cp39-cp39" -IMAGE_NAME=pulsar-build:manylinux-$PYTHON_SPEC +ARCH="x86_64" +IMAGE_NAME=pulsar-build:manylinux-$PYTHON_SPEC-$ARCH docker build -t $IMAGE_NAME ./docker/manylinux2014 \ --build-arg PYTHON_VERSION=$PYTHON_VERSION \ - --build-arg PYTHON_SPEC=$PYTHON_SPEC + --build-arg PYTHON_SPEC=$PYTHON_SPEC \ + --build-arg ARCH=$ARCH # Build wheel file BUILD_IMAGE_NAME="${BUILD_IMAGE_NAME:-pulsar-build}" -IMAGE=$BUILD_IMAGE_NAME:manylinux-$PYTHON_SPEC +IMAGE=$BUILD_IMAGE_NAME:manylinux-$PYTHON_SPEC-$ARCH VOLUME_OPTION=${VOLUME_OPTION:-"-v $ROOT_DIR:/pulsar"} COMMAND="/pulsar/pulsar-client-cpp/docker/build-wheel-file-within-docker.sh" diff --git a/pulsar-client-cpp/docker/alpine/Dockerfile b/pulsar-client-cpp/docker/alpine/Dockerfile index ef77284242cc7..d7c266646c8cd 100644 --- a/pulsar-client-cpp/docker/alpine/Dockerfile +++ b/pulsar-client-cpp/docker/alpine/Dockerfile @@ -43,12 +43,12 @@ RUN curl -O -L https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/ rm -rf /boost_1_72_0.tar.gz /boost_1_72_0 # ZLib -RUN curl -O -L https://zlib.net/zlib-1.2.11.tar.gz && \ - tar xfz zlib-1.2.11.tar.gz && \ - cd zlib-1.2.11 && \ +RUN curl -O -L https://zlib.net/fossils/zlib-1.2.13.tar.gz && \ + tar xfz zlib-1.2.13.tar.gz && \ + cd zlib-1.2.13 && \ CFLAGS="-fPIC -O3" ./configure && \ make -j4 && make install && \ - rm -rf /zlib-1.2.11.tar.gz /zlib-1.2.11 + rm -rf /zlib-1.2.13.tar.gz /zlib-1.2.13 # Compile OpenSSL RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_0j.tar.gz && \ diff --git a/pulsar-client-cpp/docker/alpine/Dockerfile-alpine-3.8 b/pulsar-client-cpp/docker/alpine/Dockerfile-alpine-3.8 index 0a9fbb40711b9..6b19326b9ddb5 100644 --- a/pulsar-client-cpp/docker/alpine/Dockerfile-alpine-3.8 +++ b/pulsar-client-cpp/docker/alpine/Dockerfile-alpine-3.8 @@ -43,12 +43,12 @@ RUN curl -O -L https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/ rm -rf /boost_1_72_0.tar.gz /boost_1_72_0 # ZLib -RUN curl -O -L https://zlib.net/zlib-1.2.11.tar.gz && \ - tar xfz zlib-1.2.11.tar.gz && \ - cd zlib-1.2.11 && \ +RUN curl -O -L https://zlib.net/fossils/zlib-1.2.13.tar.gz && \ + tar xfz zlib-1.2.13.tar.gz && \ + cd zlib-1.2.13 && \ CFLAGS="-fPIC -O3" ./configure && \ make -j4 && make install && \ - rm -rf /zlib-1.2.11.tar.gz /zlib-1.2.11 + rm -rf /zlib-1.2.13.tar.gz /zlib-1.2.13 # Compile OpenSSL RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_0j.tar.gz && \ diff --git a/pulsar-client-cpp/docker/build-wheel-file-within-docker.sh b/pulsar-client-cpp/docker/build-wheel-file-within-docker.sh index ade3ca02832d5..c04a3cc2c2eff 100755 --- a/pulsar-client-cpp/docker/build-wheel-file-within-docker.sh +++ b/pulsar-client-cpp/docker/build-wheel-file-within-docker.sh @@ -31,7 +31,7 @@ cmake . -DPYTHON_INCLUDE_DIR=/opt/python/$PYTHON_SPEC/include/python$PYTHON_VERS -DBUILD_TESTS=OFF make clean -make _pulsar -j3 VERBOSE=1 +make _pulsar -j3 cd python python setup.py bdist_wheel @@ -42,4 +42,4 @@ python setup.py bdist_wheel # Audit wheel will make sure no external dependencies are needed for # the shared library and that only symbols supported by most linux # distributions are used. -auditwheel repair dist/pulsar_client*-$PYTHON_SPEC-linux_x86_64.whl +auditwheel repair dist/pulsar_client*-$PYTHON_SPEC-linux_${ARCH}.whl diff --git a/pulsar-client-cpp/docker/build-wheels.sh b/pulsar-client-cpp/docker/build-wheels.sh index a61f21516eb05..25ac64f4846ec 100755 --- a/pulsar-client-cpp/docker/build-wheels.sh +++ b/pulsar-client-cpp/docker/build-wheels.sh @@ -27,33 +27,29 @@ BUILD_IMAGE_NAME="${BUILD_IMAGE_NAME:-apachepulsar/pulsar-build}" ROOT_DIR=`cd $(dirname $0)/../..; pwd` cd $ROOT_DIR -PYTHON_VERSIONS=( - '2.7 cp27-cp27mu' - '2.7 cp27-cp27m' - '3.5 cp35-cp35m' - '3.6 cp36-cp36m' - '3.7 cp37-cp37m' - '3.8 cp38-cp38' - '3.9 cp39-cp39' -) - -function contains() { - local n=$# - local value=${!n} - for ((i=1;i < $#;i++)) { - if [ "${!i}" == "${value}" ]; then - echo "y" - return 0 +source ./pulsar-client-cpp/docker/python-versions.sh + +function contains_build_version { + for line in "${PYTHON_VERSIONS[@]}"; do + read -r -a v <<< "$line" + value="${v[0]} ${v[1]} ${v[2]} ${v[3]}" + + if [ "${build_version}" == "${value}" ]; then + # found + res=1 + return fi - } - echo "n" - return 1 + done + + # not found + res=0 } if [ $# -ge 1 ]; then build_version=$@ - if [ $(contains "${PYTHON_VERSIONS[@]}" "${build_version}") == "y" ]; then + contains_build_version + if [ $res == 1 ]; then PYTHON_VERSIONS=( "${build_version}" ) @@ -69,9 +65,11 @@ for line in "${PYTHON_VERSIONS[@]}"; do read -r -a PY <<< "$line" PYTHON_VERSION=${PY[0]} PYTHON_SPEC=${PY[1]} - echo "--------- Build Python wheel for $PYTHON_VERSION -- $PYTHON_SPEC" + IMAGE=${PY[2]} + ARCH=${PY[3]} + echo "--------- Build Python wheel for $PYTHON_VERSION -- $IMAGE -- $PYTHON_SPEC -- $ARCH" - IMAGE=$BUILD_IMAGE_NAME:manylinux-$PYTHON_SPEC + IMAGE=$BUILD_IMAGE_NAME:${IMAGE}-$PYTHON_SPEC-$ARCH echo "Using image: $IMAGE" @@ -79,6 +77,6 @@ for line in "${PYTHON_VERSIONS[@]}"; do COMMAND="/pulsar/pulsar-client-cpp/docker/build-wheel-file-within-docker.sh" DOCKER_CMD="docker run -i ${VOLUME_OPTION} -e USE_FULL_POM_NAME -e NAME_POSTFIX ${IMAGE}" - $DOCKER_CMD bash -c "${COMMAND}" + $DOCKER_CMD bash -c "ARCH=$ARCH ${COMMAND}" done diff --git a/pulsar-client-cpp/docker/centos-7/Dockerfile b/pulsar-client-cpp/docker/centos-7/Dockerfile index 53ce9bdbba4f8..690e8f1f73f94 100644 --- a/pulsar-client-cpp/docker/centos-7/Dockerfile +++ b/pulsar-client-cpp/docker/centos-7/Dockerfile @@ -35,3 +35,10 @@ RUN mkdir -p /opt/cmake WORKDIR /opt/cmake RUN curl -L -O https://cmake.org/files/v3.4/cmake-3.4.0-Linux-x86_64.tar.gz \ && tar zxf cmake-3.4.0-Linux-x86_64.tar.gz + +# googletest +RUN curl -O -L https://github.com/google/googletest/archive/refs/tags/release-1.10.0.tar.gz \ + && tar zxf release-1.10.0.tar.gz \ + && cd googletest-release-1.10.0 \ + && mkdir build && cd build \ + && /opt/cmake/cmake-3.4.0-Linux-x86_64/bin/cmake .. && make install diff --git a/pulsar-client-cpp/docker/create-images.sh b/pulsar-client-cpp/docker/create-images.sh index 6aa1d69ef44dd..14938a40c2a9e 100755 --- a/pulsar-client-cpp/docker/create-images.sh +++ b/pulsar-client-cpp/docker/create-images.sh @@ -23,28 +23,22 @@ set -e -PYTHON_VERSIONS=( - '2.7 cp27-cp27mu manylinux1' - '2.7 cp27-cp27m manylinux1' - '3.5 cp35-cp35m manylinux2014' - '3.6 cp36-cp36m manylinux2014' - '3.7 cp37-cp37m manylinux2014' - '3.8 cp38-cp38 manylinux2014' - '3.9 cp39-cp39 manylinux2014' -) +source python-versions.sh for line in "${PYTHON_VERSIONS[@]}"; do read -r -a PY <<< "$line" PYTHON_VERSION=${PY[0]} PYTHON_SPEC=${PY[1]} BASE_IMAGE=${PY[2]} - echo "--------- Build Docker image for $PYTHON_VERSION -- $PYTHON_SPEC" + ARCH=${PY[3]} + echo "--------- Build Docker image for $PYTHON_VERSION -- $PYTHON_SPEC -- $ARCH" - IMAGE_NAME=pulsar-build:manylinux-$PYTHON_SPEC + IMAGE_NAME=pulsar-build:$BASE_IMAGE-$PYTHON_SPEC-$ARCH docker build -t $IMAGE_NAME $BASE_IMAGE \ --build-arg PYTHON_VERSION=$PYTHON_VERSION \ - --build-arg PYTHON_SPEC=$PYTHON_SPEC + --build-arg PYTHON_SPEC=$PYTHON_SPEC \ + --build-arg ARCH=$ARCH echo "==== Successfully built image $IMAGE_NAME" done diff --git a/pulsar-client-cpp/docker/manylinux1/Dockerfile b/pulsar-client-cpp/docker/manylinux1/Dockerfile index 0df92401a8f64..57c7c06d7ee14 100644 --- a/pulsar-client-cpp/docker/manylinux1/Dockerfile +++ b/pulsar-client-cpp/docker/manylinux1/Dockerfile @@ -46,12 +46,12 @@ RUN curl -O -L https://www.cpan.org/src/5.0/perl-5.10.0.tar.gz && \ #################################### # ZLib -RUN curl -O -L https://zlib.net/zlib-1.2.11.tar.gz && \ - tar xvfz zlib-1.2.11.tar.gz && \ - cd zlib-1.2.11 && \ +RUN curl -O -L https://zlib.net/fossils/zlib-1.2.13.tar.gz && \ + tar xvfz zlib-1.2.13.tar.gz && \ + cd zlib-1.2.13 && \ CFLAGS="-fPIC -O3" ./configure && \ make && make install && \ - rm -rf /zlib-1.2.11.tar.gz /zlib-1.2.11 + rm -rf /zlib-1.2.13.tar.gz /zlib-1.2.13 # Compile OpenSSL RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_0j.tar.gz && \ diff --git a/pulsar-client-cpp/docker/manylinux2014/Dockerfile b/pulsar-client-cpp/docker/manylinux2014/Dockerfile index 20a7247e30a40..31cc9a45a60d4 100644 --- a/pulsar-client-cpp/docker/manylinux2014/Dockerfile +++ b/pulsar-client-cpp/docker/manylinux2014/Dockerfile @@ -17,10 +17,8 @@ # under the License. # - -FROM quay.io/pypa/manylinux2014_x86_64 - -RUN yum install -y gtest-devel +ARG ARCH +FROM quay.io/pypa/manylinux2014_${ARCH} ARG PYTHON_VERSION ARG PYTHON_SPEC @@ -28,6 +26,10 @@ ARG PYTHON_SPEC ENV PYTHON_VERSION=${PYTHON_VERSION} ENV PYTHON_SPEC=${PYTHON_SPEC} +ARG ARCH +ENV ARCH=${ARCH} + + ENV PATH="/opt/python/${PYTHON_SPEC}/bin:${PATH}" RUN ln -s /opt/python/${PYTHON_SPEC}/include/python${PYTHON_VERSION}m /opt/python/${PYTHON_SPEC}/include/python${PYTHON_VERSION} @@ -46,84 +48,51 @@ RUN curl -O -L https://www.cpan.org/src/5.0/perl-5.10.0.tar.gz && \ #################################### # ZLib -RUN curl -O -L https://zlib.net/zlib-1.2.11.tar.gz && \ - tar xvfz zlib-1.2.11.tar.gz && \ - cd zlib-1.2.11 && \ +RUN curl -O -L https://zlib.net/fossils/zlib-1.2.13.tar.gz && \ + tar xvfz zlib-1.2.13.tar.gz && \ + cd zlib-1.2.13 && \ CFLAGS="-fPIC -O3" ./configure && \ make && make install && \ - rm -rf /zlib-1.2.11.tar.gz /zlib-1.2.11 + rm -rf /zlib-1.2.13.tar.gz /zlib-1.2.13 # Compile OpenSSL -RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_0j.tar.gz && \ - tar xvfz OpenSSL_1_1_0j.tar.gz && \ - cd openssl-OpenSSL_1_1_0j/ && \ - ./Configure -fPIC --prefix=/usr/local/ssl/ no-shared linux-x86_64 && \ - make && make install && \ - rm -rf /OpenSSL_1_1_0j.tar.gz /openssl-OpenSSL_1_1_0j +RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_1n.tar.gz && \ + tar xvfz OpenSSL_1_1_1n.tar.gz && \ + cd openssl-OpenSSL_1_1_1n/ && \ + ./Configure -fPIC --prefix=/usr/local/ssl/ no-shared linux-${ARCH} && \ + make -j8 && make install && \ + rm -rf /OpenSSL_1_1_1n.tar.gz /openssl-OpenSSL_1_1_1n # Download and compile boost -RUN curl -O -L https://boostorg.jfrog.io/artifactory/main/release/1.68.0/source/boost_1_68_0.tar.gz && \ - tar xvfz boost_1_68_0.tar.gz && \ - cd /boost_1_68_0 && \ +RUN curl -O -L https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.tar.gz && \ + tar xvfz boost_1_78_0.tar.gz && \ + cd /boost_1_78_0 && \ ./bootstrap.sh --with-libraries=program_options,filesystem,regex,thread,system,python && \ - ./b2 address-model=64 cxxflags=-fPIC link=static threading=multi variant=release install && \ - rm -rf /boost_1_68_0.tar.gz /boost_1_68_0 + ./b2 address-model=64 cxxflags=-fPIC link=static threading=multi variant=release install -j8 && \ + rm -rf /boost_1_78_0.tar.gz /boost_1_78_0 # Download and copile protoubf RUN curl -O -L https://github.com/google/protobuf/releases/download/v3.3.0/protobuf-cpp-3.3.0.tar.gz && \ tar xvfz protobuf-cpp-3.3.0.tar.gz && \ cd protobuf-3.3.0/ && \ CXXFLAGS=-fPIC ./configure && \ - make && make install && ldconfig && \ + make -j8 && make install && ldconfig && \ rm -rf /protobuf-cpp-3.3.0.tar.gz /protobuf-3.3.0 -# Compile APR -RUN curl -O -L http://archive.apache.org/dist/apr/apr-1.5.2.tar.gz && \ - tar xvfz apr-1.5.2.tar.gz && \ - cd apr-1.5.2 && \ - CFLAGS=-fPIC CXXFLAGS=-fPIC ./configure && \ - make && make install && \ - rm -rf /apr-1.5.2.tar.gz /apr-1.5.2 - -# Compile APR-Util -RUN curl -O -L http://archive.apache.org/dist/apr/apr-util-1.5.4.tar.gz && \ - tar xvfz apr-util-1.5.4.tar.gz && \ - cd apr-util-1.5.4 && \ - CFLAGS=-fPIC CXXFLAGS=-fPIC ./configure -with-apr=/usr/local/apr && \ - make && make install && \ - rm -rf /apr-util-1.5.4.tar.gz /apr-util-1.5.4 - -# Libtool -RUN curl -L -O https://ftp.gnu.org/gnu/libtool/libtool-2.4.6.tar.gz && \ - tar xvfz libtool-2.4.6.tar.gz && \ - cd libtool-2.4.6 && \ - ./configure && \ - make && make install && \ - rm -rf /libtool-2.4.6.tar.gz /libtool-2.4.6 - -# Compile log4cxx -RUN curl -O -L https://github.com/apache/logging-log4cxx/archive/v0.11.0.tar.gz && \ - tar xvfz v0.11.0.tar.gz && \ - cd logging-log4cxx-0.11.0 && \ - ./autogen.sh && \ - CXXFLAGS=-fPIC ./configure && \ - make && make install && \ - rm -rf /v0.11.0.tar.gz /logging-log4cxx-0.11.0 - # Compile expat RUN curl -O -L https://github.com/libexpat/libexpat/archive/R_2_2_0.tar.gz && \ tar xfvz R_2_2_0.tar.gz && \ cd libexpat-R_2_2_0/expat && \ ./buildconf.sh && \ CFLAGS=-fPIC CXXFLAGS=-fPIC ./configure && \ - make && make installlib && \ + make -j8 && make installlib && \ rm -rf /R_2_2_0.tar.gz /libexpat-R_2_2_0 RUN curl -O -L https://github.com/Kitware/CMake/archive/v3.12.1.tar.gz && \ tar xvfz v3.12.1.tar.gz && \ cd CMake-3.12.1 && \ ./configure && \ - make && make install && \ + make -j8 && make install && \ rm -rf /v3.12.1.tar.gz /CMake-3.12.1 # Zstandard @@ -139,7 +108,7 @@ RUN curl -O -L https://github.com/google/snappy/releases/download/1.1.3/snappy-1 tar xvfz snappy-1.1.3.tar.gz && \ cd snappy-1.1.3 && \ CXXFLAGS="-fPIC -O3" ./configure && \ - make && make install && \ + make -j8 && make install && \ rm -rf /snappy-1.1.3 /snappy-1.1.3.tar.gz # LibCurl @@ -147,7 +116,7 @@ RUN curl -O -L https://github.com/curl/curl/releases/download/curl-7_61_0/curl- tar xvfz curl-7.61.0.tar.gz && \ cd curl-7.61.0 && \ CFLAGS=-fPIC ./configure --with-ssl=/usr/local/ssl/ && \ - make && make install && \ + make -j8 && make install && \ rm -rf /curl-7.61.0.tar.gz /curl-7.61.0 RUN pip install twine diff --git a/pulsar-client-cpp/docker/manylinux_musl/Dockerfile b/pulsar-client-cpp/docker/manylinux_musl/Dockerfile new file mode 100644 index 0000000000000..40417bcd79097 --- /dev/null +++ b/pulsar-client-cpp/docker/manylinux_musl/Dockerfile @@ -0,0 +1,116 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +ARG ARCH +FROM quay.io/pypa/musllinux_1_1_${ARCH} + +ARG PYTHON_VERSION +ARG PYTHON_SPEC + +ENV PYTHON_VERSION=${PYTHON_VERSION} +ENV PYTHON_SPEC=${PYTHON_SPEC} + +ARG ARCH +ENV ARCH=${ARCH} + + +ENV PATH="/opt/python/${PYTHON_SPEC}/bin:${PATH}" + +RUN ln -s /opt/python/${PYTHON_SPEC}/include/python${PYTHON_VERSION}m /opt/python/${PYTHON_SPEC}/include/python${PYTHON_VERSION} + +# Perl (required for building OpenSSL) +RUN curl -O -L https://www.cpan.org/src/5.0/perl-5.10.0.tar.gz && \ + tar xvfz perl-5.10.0.tar.gz && \ + cd perl-5.10.0 && \ + ./configure.gnu --prefix=/usr/local/ && \ + make && make install && \ + rm -rf /perl-5.10.0.tar.gz /perl-5.10.0 + +#################################### +# These dependencies can be found in Ubuntu but they're not compiled with -fPIC, +# so they cannot be statically linked into a shared library +#################################### + +# ZLib +RUN curl -O -L https://zlib.net/fossils/zlib-1.2.13.tar.gz && \ + tar xvfz zlib-1.2.13.tar.gz && \ + cd zlib-1.2.13 && \ + CFLAGS="-fPIC -O3" ./configure && \ + make -j8 && make install && \ + rm -rf /zlib-1.2.13.tar.gz /zlib-1.2.13 + +# Compile OpenSSL +RUN curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_1_1_1n.tar.gz && \ + tar xvfz OpenSSL_1_1_1n.tar.gz && \ + cd openssl-OpenSSL_1_1_1n/ && \ + ./Configure -fPIC --prefix=/usr/local/ssl/ no-shared linux-${ARCH} && \ + make -j8 && make install && \ + rm -rf /OpenSSL_1_1_1n.tar.gz /openssl-OpenSSL_1_1_1n + +# Download and compile boost +RUN curl -O -L https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.tar.gz && \ + tar xvfz boost_1_78_0.tar.gz && \ + cd /boost_1_78_0 && \ + ./bootstrap.sh --with-libraries=program_options,filesystem,regex,thread,system,python && \ + ./b2 address-model=64 cxxflags=-fPIC link=static threading=multi variant=release install -j8 && \ + rm -rf /boost_1_78_0.tar.gz /boost_1_78_0 + +# Download and copile protoubf +RUN curl -O -L https://github.com/google/protobuf/releases/download/v3.20.0/protobuf-cpp-3.20.0.tar.gz && \ + tar xvfz protobuf-cpp-3.20.0.tar.gz && \ + cd protobuf-3.20.0/ && \ + CXXFLAGS=-fPIC ./configure && \ + make -j8 && make install && \ + rm -rf /protobuf-cpp-3.20.0.tar.gz /protobuf-3.20.0 + +RUN apk add cmake + +# Zstandard +RUN curl -O -L https://github.com/facebook/zstd/releases/download/v1.3.7/zstd-1.3.7.tar.gz && \ + tar xvfz zstd-1.3.7.tar.gz && \ + cd zstd-1.3.7 && \ + CFLAGS="-fPIC -O3" make -j8 && \ + make install && \ + rm -rf /zstd-1.3.7 /zstd-1.3.7.tar.gz + +# Snappy +RUN curl -O -L https://github.com/google/snappy/releases/download/1.1.3/snappy-1.1.3.tar.gz && \ + tar xvfz snappy-1.1.3.tar.gz && \ + cd snappy-1.1.3 && \ + CXXFLAGS="-fPIC -O3" ./configure && \ + make -j8 && make install && \ + rm -rf /snappy-1.1.3 /snappy-1.1.3.tar.gz + +# LibCurl +RUN curl -O -L https://github.com/curl/curl/releases/download/curl-7_61_0/curl-7.61.0.tar.gz && \ + tar xvfz curl-7.61.0.tar.gz && \ + cd curl-7.61.0 && \ + CFLAGS=-fPIC ./configure --with-ssl=/usr/local/ssl/ && \ + make -j8 && make install && \ + rm -rf /curl-7.61.0.tar.gz /curl-7.61.0 + +RUN pip install twine +RUN pip install fastavro +RUN pip install six +RUN pip install enum34 + + +ENV PYTHON_INCLUDE_DIR /opt/python/${PYTHON_SPEC}/include +ENV PYTHON_LIBRARIES /opt/python/${PYTHON_SPEC}/lib/python${PYTHON_VERSION} +ENV OPENSSL_ROOT_DIR /usr/local/ssl/ diff --git a/pulsar-client-cpp/docker/push-images.sh b/pulsar-client-cpp/docker/push-images.sh index 0501670e52664..a1806fa29df92 100755 --- a/pulsar-client-cpp/docker/push-images.sh +++ b/pulsar-client-cpp/docker/push-images.sh @@ -23,24 +23,18 @@ set -e -DOCKER_ORG=apachepulsar +source python-versions.sh -PYTHON_VERSIONS=( - '2.7 cp27-cp27mu' - '2.7 cp27-cp27m' - '3.5 cp35-cp35m' - '3.6 cp36-cp36m' - '3.7 cp37-cp37m' - '3.8 cp38-cp38' - '3.9 cp39-cp39' -) +DOCKER_ORG=apachepulsar for line in "${PYTHON_VERSIONS[@]}"; do read -r -a PY <<< "$line" PYTHON_VERSION=${PY[0]} PYTHON_SPEC=${PY[1]} - - IMAGE_NAME=pulsar-build:manylinux-$PYTHON_SPEC + BASE_IMAGE=${PY[2]} + ARCH=${PY[3]} + + IMAGE_NAME=pulsar-build:$BASE_IMAGE-$PYTHON_SPEC-$ARCH FULL_NAME=$DOCKER_ORG/$IMAGE_NAME echo "IMAGE_NAME: $IMAGE_NAME" diff --git a/pulsar-client-cpp/docker/python-versions.sh b/pulsar-client-cpp/docker/python-versions.sh new file mode 100644 index 0000000000000..246eae914a372 --- /dev/null +++ b/pulsar-client-cpp/docker/python-versions.sh @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +PYTHON_VERSIONS=( + '2.7 cp27-cp27mu manylinux1 x86_64' + '2.7 cp27-cp27m manylinux1 x86_64' + '3.5 cp35-cp35m manylinux1 x86_64' + '3.6 cp36-cp36m manylinux2014 x86_64' + '3.7 cp37-cp37m manylinux2014 x86_64' + '3.8 cp38-cp38 manylinux2014 x86_64' + '3.9 cp39-cp39 manylinux2014 x86_64' + '3.10 cp310-cp310 manylinux2014 x86_64' + '3.7 cp37-cp37m manylinux2014 aarch64' + '3.8 cp38-cp38 manylinux2014 aarch64' + '3.9 cp39-cp39 manylinux2014 aarch64' + '3.10 cp310-cp310 manylinux2014 aarch64' + + # Alpine compatible wheels + '3.7 cp37-cp37m manylinux_musl aarch64' + '3.8 cp38-cp38 manylinux_musl aarch64' + '3.9 cp39-cp39 manylinux_musl aarch64' + '3.10 cp310-cp310 manylinux_musl aarch64' + + '3.7 cp37-cp37m manylinux_musl x86_64' + '3.8 cp38-cp38 manylinux_musl x86_64' + '3.9 cp39-cp39 manylinux_musl x86_64' + '3.10 cp310-cp310 manylinux_musl x86_64' +) diff --git a/pulsar-client-cpp/include/pulsar/Consumer.h b/pulsar-client-cpp/include/pulsar/Consumer.h index e82d2c07fbcdf..0db8416ec6e4e 100644 --- a/pulsar-client-cpp/include/pulsar/Consumer.h +++ b/pulsar-client-cpp/include/pulsar/Consumer.h @@ -396,7 +396,6 @@ class PULSAR_PUBLIC Consumer { friend class PulsarFriend; friend class PulsarWrapper; - friend class PartitionedConsumerImpl; friend class MultiTopicsConsumerImpl; friend class ConsumerImpl; friend class ClientImpl; diff --git a/pulsar-client-cpp/include/pulsar/Message.h b/pulsar-client-cpp/include/pulsar/Message.h index 9cea48f26580d..935236bd5bb5b 100644 --- a/pulsar-client-cpp/include/pulsar/Message.h +++ b/pulsar-client-cpp/include/pulsar/Message.h @@ -179,7 +179,6 @@ class PULSAR_PUBLIC Message { Message(const MessageId& messageId, proto::MessageMetadata& metadata, SharedBuffer& payload, proto::SingleMessageMetadata& singleMetadata, const std::string& topicName); friend class PartitionedProducerImpl; - friend class PartitionedConsumerImpl; friend class MultiTopicsConsumerImpl; friend class MessageBuilder; friend class ConsumerImpl; diff --git a/pulsar-client-cpp/include/pulsar/MessageId.h b/pulsar-client-cpp/include/pulsar/MessageId.h index de64d1d822413..06be790c1ea4b 100644 --- a/pulsar-client-cpp/include/pulsar/MessageId.h +++ b/pulsar-client-cpp/include/pulsar/MessageId.h @@ -94,7 +94,6 @@ class PULSAR_PUBLIC MessageId { friend class MessageImpl; friend class Commands; friend class PartitionedProducerImpl; - friend class PartitionedConsumerImpl; friend class MultiTopicsConsumerImpl; friend class UnAckedMessageTrackerEnabled; friend class BatchAcknowledgementTracker; diff --git a/pulsar-client-cpp/include/pulsar/c/consumer_configuration.h b/pulsar-client-cpp/include/pulsar/c/consumer_configuration.h index efe353ad167c4..a11e11e480f52 100644 --- a/pulsar-client-cpp/include/pulsar/c/consumer_configuration.h +++ b/pulsar-client-cpp/include/pulsar/c/consumer_configuration.h @@ -275,6 +275,12 @@ PULSAR_PUBLIC void pulsar_consumer_set_subscription_initial_position( PULSAR_PUBLIC void pulsar_consumer_configuration_set_property(pulsar_consumer_configuration_t *conf, const char *name, const char *value); +PULSAR_PUBLIC void pulsar_consumer_configuration_set_priority_level( + pulsar_consumer_configuration_t *consumer_configuration, int priority_level); + +PULSAR_PUBLIC int pulsar_consumer_configuration_get_priority_level( + pulsar_consumer_configuration_t *consumer_configuration); + // const CryptoKeyReaderPtr getCryptoKeyReader() // // const; diff --git a/pulsar-client-cpp/lib/Version.h b/pulsar-client-cpp/include/pulsar/c/version.h similarity index 85% rename from pulsar-client-cpp/lib/Version.h rename to pulsar-client-cpp/include/pulsar/c/version.h index a274f47bb8888..ab63c8a708991 100644 --- a/pulsar-client-cpp/lib/Version.h +++ b/pulsar-client-cpp/include/pulsar/c/version.h @@ -16,11 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -#ifndef LIB_VERSION_H_ -#define LIB_VERSION_H_ -#ifndef _PULSAR_VERSION_ -#define _PULSAR_VERSION_ "1.17" -#endif +#pragma once -#endif /* LIB_VERSION_H_ */ +#include diff --git a/pulsar-client-cpp/lib/BatchMessageContainerBase.h b/pulsar-client-cpp/lib/BatchMessageContainerBase.h index 8a32d8e9dca8a..71eef5fab6287 100644 --- a/pulsar-client-cpp/lib/BatchMessageContainerBase.h +++ b/pulsar-client-cpp/lib/BatchMessageContainerBase.h @@ -112,6 +112,9 @@ class BatchMessageContainerBase : public boost::noncopyable { bool hasEnoughSpace(const Message& msg) const noexcept; bool isEmpty() const noexcept; + void processAndClear(std::function opSendMsgCallback, + FlushCallback flushCallback); + protected: // references to ProducerImpl's fields const std::string& topicName_; @@ -157,6 +160,29 @@ inline void BatchMessageContainerBase::resetStats() { sizeInBytes_ = 0; } +inline void BatchMessageContainerBase::processAndClear( + std::function opSendMsgCallback, FlushCallback flushCallback) { + if (isEmpty()) { + if (flushCallback) { + flushCallback(ResultOk); + } + } else { + const auto numBatches = getNumBatches(); + if (numBatches == 1) { + OpSendMsg opSendMsg; + Result result = createOpSendMsg(opSendMsg, flushCallback); + opSendMsgCallback(result, opSendMsg); + } else if (numBatches > 1) { + std::vector opSendMsgs; + std::vector results = createOpSendMsgs(opSendMsgs, flushCallback); + for (size_t i = 0; i < results.size(); i++) { + opSendMsgCallback(results[i], opSendMsgs[i]); + } + } // else numBatches is 0, do nothing + } + clear(); +} + inline std::ostream& operator<<(std::ostream& os, const BatchMessageContainerBase& container) { container.serialize(os); return os; diff --git a/pulsar-client-cpp/lib/CMakeLists.txt b/pulsar-client-cpp/lib/CMakeLists.txt index 6e970fa3c5277..ee9214eec8af5 100644 --- a/pulsar-client-cpp/lib/CMakeLists.txt +++ b/pulsar-client-cpp/lib/CMakeLists.txt @@ -20,7 +20,7 @@ file(GLOB PULSAR_SOURCES *.cc *.h lz4/*.cc lz4/*.h checksum/*.cc checksum/*.h stats/*.cc stats/*.h c/*.cc c/*.h auth/*.cc auth/*.h auth/athenz/*.cc auth/athenz/*.h) execute_process(COMMAND python ${CMAKE_SOURCE_DIR}/../src/get-project-version.py OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE PV) -set (CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -D_PULSAR_VERSION_=\\\"${PV}\\\"") +set (CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -D_PULSAR_VERSION_INTERNAL_=\\\"${PV}\\\"") if (NOT PROTOC_PATH) set(PROTOC_PATH protoc) diff --git a/pulsar-client-cpp/lib/ClientConnection.cc b/pulsar-client-cpp/lib/ClientConnection.cc index 6f947312f01e6..69f61ca0de415 100644 --- a/pulsar-client-cpp/lib/ClientConnection.cc +++ b/pulsar-client-cpp/lib/ClientConnection.cc @@ -37,6 +37,7 @@ #include "ProducerImpl.h" #include "ConsumerImpl.h" #include "checksum/ChecksumProvider.h" +#include "MessageIdUtil.h" DECLARE_LOG_OBJECT() @@ -160,30 +161,37 @@ ClientConnection::ClientConnection(const std::string& logicalAddress, const std: serverProtocolVersion_(ProtocolVersion_MIN), executor_(executor), resolver_(executor_->createTcpResolver()), - socket_(executor_->createSocket()), #if BOOST_VERSION >= 107000 - strand_(boost::asio::make_strand(executor_->io_service_->get_executor())), + strand_(boost::asio::make_strand(executor_->getIOService().get_executor())), #elif BOOST_VERSION >= 106600 - strand_(executor_->io_service_->get_executor()), + strand_(executor_->getIOService().get_executor()), #else - strand_(*(executor_->io_service_)), + strand_(executor_->getIOService()), #endif logicalAddress_(logicalAddress), physicalAddress_(physicalAddress), cnxString_("[ -> " + physicalAddress + "] "), incomingBuffer_(SharedBuffer::allocate(DefaultBufferSize)), - connectTimeoutTask_(std::make_shared(executor_->getIOService(), - clientConfiguration.getConnectionTimeout())), outgoingBuffer_(SharedBuffer::allocate(DefaultBufferSize)), - consumerStatsRequestTimer_(executor_->createDeadlineTimer()), maxPendingLookupRequest_(clientConfiguration.getConcurrentLookupRequest()) { + try { + socket_ = executor_->createSocket(); + connectTimeoutTask_ = std::make_shared(executor_->getIOService(), + clientConfiguration.getConnectionTimeout()); + consumerStatsRequestTimer_ = executor_->createDeadlineTimer(); + } catch (const boost::system::system_error& e) { + LOG_ERROR("Failed to initialize connection: " << e.what()); + close(); + return; + } + LOG_INFO(cnxString_ << "Create ClientConnection, timeout=" << clientConfiguration.getConnectionTimeout()); if (clientConfiguration.isUseTls()) { #if BOOST_VERSION >= 105400 boost::asio::ssl::context ctx(boost::asio::ssl::context::tlsv12_client); #else - boost::asio::ssl::context ctx(*executor_->io_service_, boost::asio::ssl::context::tlsv1_client); + boost::asio::ssl::context ctx(executor_->getIOService(), boost::asio::ssl::context::tlsv1_client); #endif Url serviceUrl; Url::parse(physicalAddress, serviceUrl); @@ -240,7 +248,7 @@ ClientConnection::ClientConnection(const std::string& logicalAddress, const std: } } - tlsSocket_ = executor_->createTlsSocket(socket_, ctx); + tlsSocket_ = ExecutorService::createTlsSocket(socket_, ctx); LOG_DEBUG("TLS SNI Host: " << serviceUrl.host()); if (!SSL_set_tlsext_host_name(tlsSocket_->native_handle(), serviceUrl.host().c_str())) { @@ -532,18 +540,25 @@ void ClientConnection::handleResolve(const boost::system::error_code& err, return; } - auto self = shared_from_this(); - connectTimeoutTask_->setCallback([this, self](const PeriodicTask::ErrorCode& ec) { - if (state_ != Ready) { - LOG_ERROR(cnxString_ << "Connection was not established in " << connectTimeoutTask_->getPeriodMs() - << " ms, close the socket"); + auto self = ClientConnectionWeakPtr(shared_from_this()); + + connectTimeoutTask_->setCallback([self](const PeriodicTask::ErrorCode& ec) { + ClientConnectionPtr ptr = self.lock(); + if (!ptr) { + // Connection was already destroyed + return; + } + + if (ptr->state_ != Ready) { + LOG_ERROR(ptr->cnxString_ << "Connection was not established in " + << ptr->connectTimeoutTask_->getPeriodMs() << " ms, close the socket"); PeriodicTask::ErrorCode err; - socket_->close(err); + ptr->socket_->close(err); if (err) { - LOG_WARN(cnxString_ << "Failed to close socket: " << err.message()); + LOG_WARN(ptr->cnxString_ << "Failed to close socket: " << err.message()); } } - connectTimeoutTask_->stop(); + ptr->connectTimeoutTask_->stop(); }); LOG_DEBUG(cnxString_ << "Connecting to " << endpointIterator->endpoint() << "..."); @@ -577,9 +592,9 @@ void ClientConnection::handleRead(const boost::system::error_code& err, size_t b if (err || bytesTransferred == 0) { if (err) { if (err == boost::asio::error::operation_aborted) { - LOG_DEBUG(cnxString_ << "Read failed: " << err.message()); + LOG_DEBUG(cnxString_ << "Read operation was canceled: " << err.message()); } else { - LOG_ERROR(cnxString_ << "Read operation was cancelled"); + LOG_ERROR(cnxString_ << "Read operation failed: " << err.message()); } } // else: bytesTransferred == 0, which means server has closed the connection close(); @@ -795,6 +810,10 @@ void ClientConnection::handleIncomingCommand() { } case Ready: { + // Since we are receiving data from the connection, we are assuming that for now the connection is + // still working well. + havePendingPingRequest_ = false; + // Handle normal commands switch (incomingCmd_.type()) { case BaseCommand::SEND_RECEIPT: { @@ -895,13 +914,16 @@ void ClientConnection::handleIncomingCommand() { if (partitionMetadataResponse.has_error()) { LOG_ERROR(cnxString_ << "Failed partition-metadata lookup req_id: " << partitionMetadataResponse.request_id() - << " error: " << partitionMetadataResponse.error()); + << " error: " << partitionMetadataResponse.error() + << " msg: " << partitionMetadataResponse.message()); + checkServerError(partitionMetadataResponse.error()); + lookupDataPromise->setFailed(getResult(partitionMetadataResponse.error())); } else { LOG_ERROR(cnxString_ << "Failed partition-metadata lookup req_id: " << partitionMetadataResponse.request_id() << " with empty response: "); + lookupDataPromise->setFailed(ResultConnectError); } - lookupDataPromise->setFailed(ResultConnectError); } else { LookupDataResultPtr lookupResultPtr = std::make_shared(); lookupResultPtr->setPartitions(partitionMetadataResponse.partitions()); @@ -979,13 +1001,16 @@ void ClientConnection::handleIncomingCommand() { if (lookupTopicResponse.has_error()) { LOG_ERROR(cnxString_ << "Failed lookup req_id: " << lookupTopicResponse.request_id() - << " error: " << lookupTopicResponse.error()); + << " error: " << lookupTopicResponse.error() + << " msg: " << lookupTopicResponse.message()); + checkServerError(lookupTopicResponse.error()); + lookupDataPromise->setFailed(getResult(lookupTopicResponse.error())); } else { LOG_ERROR(cnxString_ << "Failed lookup req_id: " << lookupTopicResponse.request_id() << " with empty response: "); + lookupDataPromise->setFailed(ResultConnectError); } - lookupDataPromise->setFailed(ResultConnectError); } else { LOG_DEBUG(cnxString_ << "Received lookup response from server. req_id: " @@ -1065,7 +1090,7 @@ void ClientConnection::handleIncomingCommand() { PendingGetLastMessageIdRequestsMap::iterator it = pendingGetLastMessageIdRequests_.find(error.request_id()); if (it != pendingGetLastMessageIdRequests_.end()) { - Promise getLastMessageIdPromise = it->second; + auto getLastMessageIdPromise = it->second; pendingGetLastMessageIdRequests_.erase(it); lock.unlock(); @@ -1144,7 +1169,6 @@ void ClientConnection::handleIncomingCommand() { case BaseCommand::PONG: { LOG_DEBUG(cnxString_ << "Received response to ping message"); - havePendingPingRequest_ = false; break; } @@ -1184,15 +1208,18 @@ void ClientConnection::handleIncomingCommand() { pendingGetLastMessageIdRequests_.find(getLastMessageIdResponse.request_id()); if (it != pendingGetLastMessageIdRequests_.end()) { - Promise getLastMessageIdPromise = it->second; + auto getLastMessageIdPromise = it->second; pendingGetLastMessageIdRequests_.erase(it); lock.unlock(); - MessageIdData messageIdData = getLastMessageIdResponse.last_message_id(); - MessageId messageId = MessageId(messageIdData.partition(), messageIdData.ledgerid(), - messageIdData.entryid(), messageIdData.batch_index()); - - getLastMessageIdPromise.setValue(messageId); + if (getLastMessageIdResponse.has_consumer_mark_delete_position()) { + getLastMessageIdPromise.setValue( + {toMessageId(getLastMessageIdResponse.last_message_id()), + toMessageId(getLastMessageIdResponse.consumer_mark_delete_position())}); + } else { + getLastMessageIdPromise.setValue( + {toMessageId(getLastMessageIdResponse.last_message_id())}); + } } else { lock.unlock(); LOG_WARN( @@ -1491,13 +1518,10 @@ void ClientConnection::close(Result result) { return; } state_ = Disconnected; - boost::system::error_code err; - socket_->close(err); - if (err) { - LOG_WARN(cnxString_ << "Failed to close socket: " << err.message()); - } + closeSocket(); if (tlsSocket_) { + boost::system::error_code err; tlsSocket_->lowest_layer().close(err); if (err) { LOG_WARN(cnxString_ << "Failed to close TLS socket: " << err.message()); @@ -1529,7 +1553,9 @@ void ClientConnection::close(Result result) { consumerStatsRequestTimer_.reset(); } - connectTimeoutTask_->stop(); + if (connectTimeoutTask_) { + connectTimeoutTask_->stop(); + } lock.unlock(); LOG_INFO(cnxString_ << "Connection closed"); @@ -1601,9 +1627,10 @@ Commands::ChecksumType ClientConnection::getChecksumType() const { return getServerProtocolVersion() >= proto::v6 ? Commands::Crc32c : Commands::None; } -Future ClientConnection::newGetLastMessageId(uint64_t consumerId, uint64_t requestId) { +Future ClientConnection::newGetLastMessageId(uint64_t consumerId, + uint64_t requestId) { Lock lock(mutex_); - Promise promise; + Promise promise; if (isClosed()) { lock.unlock(); LOG_ERROR(cnxString_ << " Client is not connected to the broker"); @@ -1613,7 +1640,12 @@ Future ClientConnection::newGetLastMessageId(uint64_t consume pendingGetLastMessageIdRequests_.insert(std::make_pair(requestId, promise)); lock.unlock(); - sendRequestWithId(Commands::newGetLastMessageId(consumerId, requestId), requestId); + sendRequestWithId(Commands::newGetLastMessageId(consumerId, requestId), requestId) + .addListener([promise](Result result, const ResponseData& data) { + if (result != ResultOk) { + promise.setFailed(result); + } + }); return promise.getFuture(); } @@ -1634,4 +1666,29 @@ Future ClientConnection::newGetTopicsOfNamespace(con return promise.getFuture(); } +void ClientConnection::closeSocket() { + boost::system::error_code err; + if (socket_) { + socket_->close(err); + if (err) { + LOG_WARN(cnxString_ << "Failed to close socket: " << err.message()); + } + } +} + +void ClientConnection::checkServerError(const proto::ServerError& error) { + switch (error) { + case proto::ServerError::ServiceNotReady: + closeSocket(); + break; + case proto::ServerError::TooManyRequests: + // TODO: Implement maxNumberOfRejectedRequestPerConnection like + // https://github.com/apache/pulsar/pull/274 + closeSocket(); + break; + default: + break; + } +} + } // namespace pulsar diff --git a/pulsar-client-cpp/lib/ClientConnection.h b/pulsar-client-cpp/lib/ClientConnection.h index 48e6d57a0a23c..b615eaab2d08f 100644 --- a/pulsar-client-cpp/lib/ClientConnection.h +++ b/pulsar-client-cpp/lib/ClientConnection.h @@ -46,6 +46,7 @@ #include #include #include "lib/PeriodicTask.h" +#include "lib/GetLastMessageIdResponse.h" using namespace pulsar; @@ -156,7 +157,7 @@ class PULSAR_PUBLIC ClientConnection : public std::enable_shared_from_this newConsumerStats(uint64_t consumerId, uint64_t requestId); - Future newGetLastMessageId(uint64_t consumerId, uint64_t requestId); + Future newGetLastMessageId(uint64_t consumerId, uint64_t requestId); Future newGetTopicsOfNamespace(const std::string& nsName, uint64_t requestId); @@ -306,7 +307,7 @@ class PULSAR_PUBLIC ClientConnection : public std::enable_shared_from_this> PendingConsumerStatsMap; PendingConsumerStatsMap pendingConsumerStatsMap_; - typedef std::map> PendingGetLastMessageIdRequestsMap; + typedef std::map> PendingGetLastMessageIdRequestsMap; PendingGetLastMessageIdRequestsMap pendingGetLastMessageIdRequests_; typedef std::map> PendingGetNamespaceTopicsMap; @@ -339,6 +340,9 @@ class PULSAR_PUBLIC ClientConnection : public std::enable_shared_from_this #include #include @@ -34,6 +34,7 @@ #include #include #include +#include #ifdef USE_LOG4CXX #include "Log4CxxLogger.h" #endif @@ -179,9 +180,6 @@ void ClientImpl::handleCreateProducer(const Result result, const LookupDataResul producer->getProducerCreatedFuture().addListener( std::bind(&ClientImpl::handleProducerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, callback, producer)); - Lock lock(mutex_); - producers_.push_back(producer); - lock.unlock(); producer->start(); } else { LOG_ERROR("Error Checking/Getting Partition Metadata while creating producer on " @@ -192,7 +190,14 @@ void ClientImpl::handleCreateProducer(const Result result, const LookupDataResul void ClientImpl::handleProducerCreated(Result result, ProducerImplBaseWeakPtr producerBaseWeakPtr, CreateProducerCallback callback, ProducerImplBasePtr producer) { - callback(result, Producer(producer)); + if (result == ResultOk) { + Lock lock(mutex_); + producers_.push_back(producer); + lock.unlock(); + callback(result, Producer(producer)); + } else { + callback(result, {}); + } } void ClientImpl::createReaderAsync(const std::string& topic, const MessageId& startMessageId, @@ -235,10 +240,13 @@ void ClientImpl::handleReaderMetadataLookup(const Result result, const LookupDat ReaderImplPtr reader = std::make_shared(shared_from_this(), topicName->toString(), conf, getListenerExecutorProvider()->get(), callback); - reader->start(startMessageId); - - Lock lock(mutex_); - consumers_.push_back(reader->getConsumer()); + ConsumerImplBasePtr consumer = reader->getConsumer().lock(); + auto self = shared_from_this(); + reader->start(startMessageId, [this, self](const ConsumerImplBaseWeakPtr& weakConsumerPtr) { + Lock lock(mutex_); + consumers_.push_back(weakConsumerPtr); + lock.unlock(); + }); } void ClientImpl::subscribeWithRegexAsync(const std::string& regexPattern, const std::string& subscriptionName, @@ -285,9 +293,6 @@ void ClientImpl::createPatternMultiTopicsConsumer(const Result result, const Nam consumer->getConsumerCreatedFuture().addListener( std::bind(&ClientImpl::handleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, callback, consumer)); - Lock lock(mutex_); - consumers_.push_back(consumer); - lock.unlock(); consumer->start(); } else { LOG_ERROR("Error Getting topicsOfNameSpace while createPatternMultiTopicsConsumer: " << result); @@ -311,6 +316,7 @@ void ClientImpl::subscribeAsync(const std::vector& topics, const st return; } } + lock.unlock(); if (topicNamePtr) { std::string randomName = generateRandomName(); @@ -325,8 +331,6 @@ void ClientImpl::subscribeAsync(const std::vector& topics, const st consumer->getConsumerCreatedFuture().addListener(std::bind(&ClientImpl::handleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, callback, consumer)); - consumers_.push_back(consumer); - lock.unlock(); consumer->start(); } @@ -372,8 +376,9 @@ void ClientImpl::handleSubscribe(const Result result, const LookupDataResultPtr callback(ResultInvalidConfiguration, Consumer()); return; } - consumer = std::make_shared( - shared_from_this(), subscriptionName, topicName, partitionMetadata->getPartitions(), conf); + consumer = std::make_shared(shared_from_this(), topicName, + partitionMetadata->getPartitions(), + subscriptionName, conf, lookupServicePtr_); } else { auto consumerImpl = std::make_shared(shared_from_this(), topicName->toString(), subscriptionName, conf); @@ -383,9 +388,6 @@ void ClientImpl::handleSubscribe(const Result result, const LookupDataResultPtr consumer->getConsumerCreatedFuture().addListener( std::bind(&ClientImpl::handleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, callback, consumer)); - Lock lock(mutex_); - consumers_.push_back(consumer); - lock.unlock(); consumer->start(); } else { LOG_ERROR("Error Checking/Getting Partition Metadata while Subscribing on " << topicName->toString() @@ -396,7 +398,14 @@ void ClientImpl::handleSubscribe(const Result result, const LookupDataResultPtr void ClientImpl::handleConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, SubscribeCallback callback, ConsumerImplBasePtr consumer) { - callback(result, Consumer(consumer)); + if (result == ResultOk) { + Lock lock(mutex_); + consumers_.push_back(consumer); + lock.unlock(); + callback(result, Consumer(consumer)); + } else { + callback(result, {}); + } } Future ClientImpl::getConnection(const std::string& topic) { @@ -530,17 +539,29 @@ void ClientImpl::handleClose(Result result, SharedInt numberOfOpenHandlers, Resu } if (*numberOfOpenHandlers == 0) { Lock lock(mutex_); - state_ = Closed; - lock.unlock(); + if (state_ == Closed) { + LOG_DEBUG("Client is already shutting down, possible race condition in handleClose"); + return; + } else { + state_ = Closed; + lock.unlock(); + } LOG_DEBUG("Shutting down producers and consumers for client"); - shutdown(); - if (callback) { - if (closingError != ResultOk) { - LOG_DEBUG("Problem in closing client, could not close one or more consumers or producers"); + // handleClose() is called in ExecutorService's event loop, while shutdown() tried to wait the event + // loop exits. So here we use another thread to call shutdown(). + auto self = shared_from_this(); + std::thread shutdownTask{[this, self, callback] { + shutdown(); + if (callback) { + if (closingError != ResultOk) { + LOG_DEBUG( + "Problem in closing client, could not close one or more consumers or producers"); + } + callback(closingError); } - callback(closingError); - } + }}; + shutdownTask.detach(); } } @@ -576,11 +597,25 @@ void ClientImpl::shutdown() { return; } LOG_DEBUG("ConnectionPool is closed"); - ioExecutorProvider_->close(); + + // 500ms as the timeout is long enough because ExecutorService::close calls io_service::stop() internally + // and waits until io_service::run() in another thread returns, which should be as soon as possible after + // stop() is called. + TimeoutProcessor timeoutProcessor{500}; + + timeoutProcessor.tik(); + ioExecutorProvider_->close(timeoutProcessor.getLeftTimeout()); + timeoutProcessor.tok(); LOG_DEBUG("ioExecutorProvider_ is closed"); - listenerExecutorProvider_->close(); + + timeoutProcessor.tik(); + listenerExecutorProvider_->close(timeoutProcessor.getLeftTimeout()); + timeoutProcessor.tok(); LOG_DEBUG("listenerExecutorProvider_ is closed"); - partitionListenerExecutorProvider_->close(); + + timeoutProcessor.tik(); + partitionListenerExecutorProvider_->close(timeoutProcessor.getLeftTimeout()); + timeoutProcessor.tok(); LOG_DEBUG("partitionListenerExecutorProvider_ is closed"); } diff --git a/pulsar-client-cpp/lib/Commands.cc b/pulsar-client-cpp/lib/Commands.cc index 54c8c65f0c71b..33db6e42a13d2 100644 --- a/pulsar-client-cpp/lib/Commands.cc +++ b/pulsar-client-cpp/lib/Commands.cc @@ -18,7 +18,7 @@ */ #include "Commands.h" #include "MessageImpl.h" -#include "Version.h" +#include "VersionInternal.h" #include "pulsar/MessageBuilder.h" #include "LogUtils.h" #include "PulsarApi.pb.h" @@ -215,7 +215,7 @@ SharedBuffer Commands::newConnect(const AuthenticationPtr& authentication, const BaseCommand cmd; cmd.set_type(BaseCommand::CONNECT); CommandConnect* connect = cmd.mutable_connect(); - connect->set_client_version(_PULSAR_VERSION_); + connect->set_client_version(_PULSAR_VERSION_INTERNAL_); connect->set_auth_method_name(authentication->getAuthMethodName()); connect->set_protocol_version(ProtocolVersion_MAX); @@ -243,7 +243,7 @@ SharedBuffer Commands::newAuthResponse(const AuthenticationPtr& authentication, BaseCommand cmd; cmd.set_type(BaseCommand::AUTH_RESPONSE); CommandAuthResponse* authResponse = cmd.mutable_authresponse(); - authResponse->set_client_version(_PULSAR_VERSION_); + authResponse->set_client_version(_PULSAR_VERSION_INTERNAL_); AuthData* authData = authResponse->mutable_response(); authData->set_auth_method_name(authentication->getAuthMethodName()); @@ -687,26 +687,35 @@ void Commands::initBatchMessageMetadata(const Message& msg, pulsar::proto::Messa batchMetadata.add_replicate_to(metadata.replicate_to(i)); } } - // TODO: set other optional fields + if (metadata.has_schema_version()) { + batchMetadata.set_schema_version(metadata.schema_version()); + } } uint64_t Commands::serializeSingleMessageInBatchWithPayload(const Message& msg, SharedBuffer& batchPayLoad, unsigned long maxMessageSizeInBytes) { + const auto& msgMetadata = msg.impl_->metadata; SingleMessageMetadata metadata; - if (msg.impl_->hasPartitionKey()) { - metadata.set_partition_key(msg.impl_->getPartitionKey()); + if (msgMetadata.has_partition_key()) { + metadata.set_partition_key(msgMetadata.partition_key()); + } + if (msgMetadata.has_ordering_key()) { + metadata.set_ordering_key(msgMetadata.ordering_key()); } - for (MessageBuilder::StringMap::const_iterator it = msg.impl_->properties().begin(); - it != msg.impl_->properties().end(); it++) { - proto::KeyValue* keyValue = proto::KeyValue().New(); - keyValue->set_key(it->first); - keyValue->set_value(it->second); + metadata.mutable_properties()->Reserve(msgMetadata.properties_size()); + for (int i = 0; i < msgMetadata.properties_size(); i++) { + auto keyValue = proto::KeyValue().New(); + *keyValue = msgMetadata.properties(i); metadata.mutable_properties()->AddAllocated(keyValue); } - if (msg.impl_->getEventTimestamp() != 0) { - metadata.set_event_time(msg.impl_->getEventTimestamp()); + if (msgMetadata.has_event_time()) { + metadata.set_event_time(msgMetadata.event_time()); + } + + if (msgMetadata.has_sequence_id()) { + metadata.set_sequence_id(msgMetadata.sequence_id()); } // Format of batch message @@ -736,7 +745,7 @@ uint64_t Commands::serializeSingleMessageInBatchWithPayload(const Message& msg, batchPayLoad.bytesWritten(msgMetadataSize); batchPayLoad.write(msg.impl_->payload.data(), payloadSize); - return msg.impl_->metadata.sequence_id(); + return msgMetadata.sequence_id(); } Message Commands::deSerializeSingleMessageInBatch(Message& batchedMessage, int32_t batchIndex) { diff --git a/pulsar-client-cpp/lib/ConsumerImpl.cc b/pulsar-client-cpp/lib/ConsumerImpl.cc index 77c0fa9d52a94..111b9193aec86 100644 --- a/pulsar-client-cpp/lib/ConsumerImpl.cc +++ b/pulsar-client-cpp/lib/ConsumerImpl.cc @@ -25,6 +25,7 @@ #include "pulsar/Result.h" #include "pulsar/MessageId.h" #include "Utils.h" +#include "MessageIdUtil.h" #include "AckGroupingTracker.h" #include "AckGroupingTrackerEnabled.h" #include "AckGroupingTrackerDisabled.h" @@ -51,7 +52,6 @@ ConsumerImpl::ConsumerImpl(const ClientImplPtr client, const std::string& topic, hasParent_(hasParent), consumerTopicType_(consumerTopicType), subscriptionMode_(subscriptionMode), - startMessageId_(startMessageId), // This is the initial capacity of the queue incomingMessages_(std::max(config_.getReceiverQueueSize(), 1)), availablePermits_(0), @@ -63,7 +63,7 @@ ConsumerImpl::ConsumerImpl(const ClientImplPtr client, const std::string& topic, negativeAcksTracker_(client, *this, conf), ackGroupingTrackerPtr_(std::make_shared()), readCompacted_(conf.isReadCompacted()), - lastMessageInBroker_(Optional::of(MessageId())) { + startMessageId_(startMessageId) { std::stringstream consumerStrStream; consumerStrStream << "[" << topic_ << ", " << subscription_ << ", " << consumerId_ << "] "; consumerStr_ = consumerStrStream.str(); @@ -158,9 +158,7 @@ void ConsumerImpl::start() { } void ConsumerImpl::connectionOpened(const ClientConnectionPtr& cnx) { - Lock lock(mutex_); if (state_ == Closed) { - lock.unlock(); LOG_DEBUG(getName() << "connectionOpened : Consumer is already closed"); return; } @@ -169,23 +167,24 @@ void ConsumerImpl::connectionOpened(const ClientConnectionPtr& cnx) { // sending the subscribe request. cnx->registerConsumer(consumerId_, shared_from_this()); + Lock lockForMessageId(mutexForMessageId_); Optional firstMessageInQueue = clearReceiveQueue(); - unAckedMessageTrackerPtr_->clear(); - batchAcknowledgementTracker_.clear(); - if (subscriptionMode_ == Commands::SubscriptionModeNonDurable) { // Update startMessageId so that we can discard messages after delivery // restarts startMessageId_ = firstMessageInQueue; } + const auto startMessageId = startMessageId_; + lockForMessageId.unlock(); - lock.unlock(); + unAckedMessageTrackerPtr_->clear(); + batchAcknowledgementTracker_.clear(); ClientImplPtr client = client_.lock(); uint64_t requestId = client->newRequestId(); SharedBuffer cmd = Commands::newSubscribe( topic_, subscription_, consumerId_, requestId, getSubType(), consumerName_, subscriptionMode_, - startMessageId_, readCompacted_, config_.getProperties(), config_.getSchema(), getInitialPosition(), + startMessageId, readCompacted_, config_.getProperties(), config_.getSchema(), getInitialPosition(), config_.isReplicateSubscriptionStateEnabled(), config_.getKeySharedPolicy(), config_.getPriorityLevel()); cnx->sendRequestWithId(cmd, requestId) @@ -198,7 +197,6 @@ void ConsumerImpl::connectionFailed(Result result) { ConsumerImplPtr ptr = shared_from_this(); if (consumerCreatedPromise_.setFailed(result)) { - Lock lock(mutex_); state_ = Failed; } } @@ -270,15 +268,15 @@ void ConsumerImpl::handleCreateConsumer(const ClientConnectionPtr& cnx, Result r void ConsumerImpl::unsubscribeAsync(ResultCallback callback) { LOG_INFO(getName() << "Unsubscribing"); - Lock lock(mutex_); if (state_ != Ready) { - lock.unlock(); callback(ResultAlreadyClosed); LOG_ERROR(getName() << "Can not unsubscribe a closed subscription, please call subscribe again and " "then call unsubscribe"); return; } + Lock lock(mutex_); + ClientConnectionPtr cnx = getCnx().lock(); if (cnx) { LOG_DEBUG(getName() << "Unsubscribe request sent for consumer - " << consumerId_); @@ -299,7 +297,6 @@ void ConsumerImpl::unsubscribeAsync(ResultCallback callback) { void ConsumerImpl::handleUnsubscribe(Result result, ResultCallback callback) { if (result == ResultOk) { - Lock lock(mutex_); state_ = Closed; LOG_INFO(getName() << "Unsubscribed successfully"); } else { @@ -440,6 +437,9 @@ uint32_t ConsumerImpl::receiveIndividualMessagesFromBatch(const ClientConnection batchAcknowledgementTracker_.receivedMessage(batchedMessage); LOG_DEBUG("Received Batch messages of size - " << batchSize << " -- msgId: " << batchedMessage.getMessageId()); + Lock lock(mutexForMessageId_); + const auto startMessageId = startMessageId_; + lock.unlock(); int skippedMessages = 0; @@ -449,14 +449,14 @@ uint32_t ConsumerImpl::receiveIndividualMessagesFromBatch(const ClientConnection msg.impl_->setRedeliveryCount(redeliveryCount); msg.impl_->setTopicName(batchedMessage.getTopicName()); - if (startMessageId_.is_present()) { + if (startMessageId.is_present()) { const MessageId& msgId = msg.getMessageId(); // If we are receiving a batch message, we need to discard messages that were prior // to the startMessageId - if (msgId.ledgerId() == startMessageId_.value().ledgerId() && - msgId.entryId() == startMessageId_.value().entryId() && - msgId.batchIndex() <= startMessageId_.value().batchIndex()) { + if (msgId.ledgerId() == startMessageId.value().ledgerId() && + msgId.entryId() == startMessageId.value().entryId() && + msgId.batchIndex() <= startMessageId.value().batchIndex()) { LOG_DEBUG(getName() << "Ignoring message from before the startMessageId" << msg.getMessageId()); ++skippedMessages; @@ -587,7 +587,7 @@ void ConsumerImpl::internalListener() { trackMessage(msg); try { consumerStatsBasePtr_->receivedMessage(msg, ResultOk); - lastDequedMessage_ = Optional::of(msg.getMessageId()); + lastDequedMessageId_ = msg.getMessageId(); messageListener_(Consumer(shared_from_this()), msg); } catch (const std::exception& e) { LOG_ERROR(getName() << "Exception thrown from listener" << e.what()); @@ -645,12 +645,10 @@ void ConsumerImpl::receiveAsync(ReceiveCallback& callback) { Message msg; // fail the callback if consumer is closing or closed - Lock stateLock(mutex_); if (state_ != Ready) { callback(ResultAlreadyClosed, msg); return; } - stateLock.unlock(); Lock lock(pendingReceiveMutex_); if (incomingMessages_.pop(msg, std::chrono::milliseconds(0))) { @@ -668,12 +666,10 @@ void ConsumerImpl::receiveAsync(ReceiveCallback& callback) { } Result ConsumerImpl::receiveHelper(Message& msg) { - { - Lock lock(mutex_); - if (state_ != Ready) { - return ResultAlreadyClosed; - } + if (state_ != Ready) { + return ResultAlreadyClosed; } + if (messageListener_) { LOG_ERROR(getName() << "Can not receive when a listener has been set"); return ResultInvalidConfiguration; @@ -700,11 +696,8 @@ Result ConsumerImpl::receiveHelper(Message& msg, int timeout) { return ResultInvalidConfiguration; } - { - Lock lock(mutex_); - if (state_ != Ready) { - return ResultAlreadyClosed; - } + if (state_ != Ready) { + return ResultAlreadyClosed; } if (messageListener_) { @@ -721,8 +714,9 @@ Result ConsumerImpl::receiveHelper(Message& msg, int timeout) { } void ConsumerImpl::messageProcessed(Message& msg, bool track) { - Lock lock(mutex_); - lastDequedMessage_ = Optional::of(msg.getMessageId()); + Lock lock(mutexForMessageId_); + lastDequedMessageId_ = msg.getMessageId(); + lock.unlock(); ClientConnectionPtr currentCnx = getCnx().lock(); if (currentCnx && msg.impl_->cnx_ != currentCnx.get()) { @@ -754,11 +748,11 @@ Optional ConsumerImpl::clearReceiveQueue() { previousMessageId = MessageId(-1, nextMessageId.ledgerId(), nextMessageId.entryId() - 1, -1); } return Optional::of(previousMessageId); - } else if (lastDequedMessage_.is_present()) { + } else if (lastDequedMessageId_ != MessageId::earliest()) { // If the queue was empty we need to restart from the message just after the last one that has been // dequeued // in the past - return lastDequedMessage_; + return Optional::of(lastDequedMessageId_); } else { // No message was received or dequeued by this consumer. Next message would still be the // startMessageId @@ -878,13 +872,10 @@ void ConsumerImpl::disconnectConsumer() { } void ConsumerImpl::closeAsync(ResultCallback callback) { - Lock lock(mutex_); - // Keep a reference to ensure object is kept alive ConsumerImplPtr ptr = shared_from_this(); if (state_ != Ready) { - lock.unlock(); if (callback) { callback(ResultAlreadyClosed); } @@ -902,7 +893,6 @@ void ConsumerImpl::closeAsync(ResultCallback callback) { ClientConnectionPtr cnx = getCnx().lock(); if (!cnx) { state_ = Closed; - lock.unlock(); // If connection is gone, also the consumer is closed on the broker side if (callback) { callback(ResultOk); @@ -913,7 +903,6 @@ void ConsumerImpl::closeAsync(ResultCallback callback) { ClientImplPtr client = client_.lock(); if (!client) { state_ = Closed; - lock.unlock(); // Client was already destroyed if (callback) { callback(ResultOk); @@ -921,8 +910,6 @@ void ConsumerImpl::closeAsync(ResultCallback callback) { return; } - // Lock is no longer required - lock.unlock(); int requestId = client->newRequestId(); Future future = cnx->sendRequestWithId(Commands::newCloseConsumer(consumerId_, requestId), requestId); @@ -938,9 +925,7 @@ void ConsumerImpl::closeAsync(ResultCallback callback) { void ConsumerImpl::handleClose(Result result, ResultCallback callback, ConsumerImplPtr consumer) { if (result == ResultOk) { - Lock lock(mutex_); state_ = Closed; - lock.unlock(); ClientConnectionPtr cnx = getCnx().lock(); if (cnx) { @@ -960,22 +945,14 @@ void ConsumerImpl::handleClose(Result result, ResultCallback callback, ConsumerI const std::string& ConsumerImpl::getName() const { return consumerStr_; } void ConsumerImpl::shutdown() { - Lock lock(mutex_); state_ = Closed; - lock.unlock(); consumerCreatedPromise_.setFailed(ResultAlreadyClosed); } -bool ConsumerImpl::isClosed() { - Lock lock(mutex_); - return state_ == Closed; -} +bool ConsumerImpl::isClosed() { return state_ == Closed; } -bool ConsumerImpl::isOpen() { - Lock lock(mutex_); - return state_ == Ready; -} +bool ConsumerImpl::isOpen() { return state_ == Ready; } Result ConsumerImpl::pauseMessageListener() { if (!messageListener_) { @@ -1038,14 +1015,13 @@ void ConsumerImpl::redeliverMessages(const std::set& messageIds) { int ConsumerImpl::getNumOfPrefetchedMessages() const { return incomingMessages_.size(); } void ConsumerImpl::getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) { - Lock lock(mutex_); if (state_ != Ready) { LOG_ERROR(getName() << "Client connection is not open, please try again later.") - lock.unlock(); callback(ResultConsumerNotInitialized, BrokerConsumerStats()); return; } + Lock lock(mutex_); if (brokerConsumerStats_.isValid()) { LOG_DEBUG(getName() << "Serving data from cache"); BrokerConsumerStatsImpl brokerConsumerStats = brokerConsumerStats_; @@ -1094,6 +1070,9 @@ void ConsumerImpl::brokerConsumerStatsListener(Result res, BrokerConsumerStatsIm void ConsumerImpl::handleSeek(Result result, ResultCallback callback) { if (result == ResultOk) { + Lock lock(mutexForMessageId_); + lastDequedMessageId_ = MessageId::earliest(); + lock.unlock(); LOG_INFO(getName() << "Seek successfully"); } else { LOG_ERROR(getName() << "Failed to seek: " << strResult(result)); @@ -1102,16 +1081,14 @@ void ConsumerImpl::handleSeek(Result result, ResultCallback callback) { } void ConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { - Lock lock(mutex_); - if (state_ == Closed || state_ == Closing) { - lock.unlock(); + const auto state = state_.load(); + if (state == Closed || state == Closing) { LOG_ERROR(getName() << "Client connection already closed."); if (callback) { callback(ResultAlreadyClosed); } return; } - lock.unlock(); this->ackGroupingTrackerPtr_->flushAndClean(); ClientConnectionPtr cnx = getCnx().lock(); @@ -1135,16 +1112,14 @@ void ConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { } void ConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { - Lock lock(mutex_); - if (state_ == Closed || state_ == Closing) { - lock.unlock(); + const auto state = state_.load(); + if (state == Closed || state == Closing) { LOG_ERROR(getName() << "Client connection already closed."); if (callback) { callback(ResultAlreadyClosed); } return; } - lock.unlock(); ClientConnectionPtr cnx = getCnx().lock(); if (cnx) { @@ -1168,51 +1143,54 @@ void ConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { bool ConsumerImpl::isReadCompacted() { return readCompacted_; } +inline bool hasMoreMessages(const MessageId& lastMessageIdInBroker, const MessageId& messageId) { + return lastMessageIdInBroker > messageId && lastMessageIdInBroker.entryId() != -1; +} + void ConsumerImpl::hasMessageAvailableAsync(HasMessageAvailableCallback callback) { - MessageId lastDequed = this->lastMessageIdDequed(); - MessageId lastInBroker = this->lastMessageIdInBroker(); - if (lastInBroker > lastDequed && lastInBroker.entryId() != -1) { - callback(ResultOk, true); - return; - } + Lock lock(mutexForMessageId_); + const auto messageId = + (lastDequedMessageId_ == MessageId::earliest()) ? startMessageId_.value() : lastDequedMessageId_; - getLastMessageIdAsync([lastDequed, callback](Result result, MessageId messageId) { - if (result == ResultOk) { - if (messageId > lastDequed && messageId.entryId() != -1) { - callback(ResultOk, true); + if (messageId == MessageId::latest()) { + lock.unlock(); + getLastMessageIdAsync([callback](Result result, const GetLastMessageIdResponse& response) { + if (result != ResultOk) { + callback(result, {}); + return; + } + if (response.hasMarkDeletePosition() && response.getLastMessageId().entryId() >= 0) { + // We only care about comparing ledger ids and entry ids as mark delete position doesn't have + // other ids such as batch index + callback(ResultOk, compareLedgerAndEntryId(response.getMarkDeletePosition(), + response.getLastMessageId()) < 0); } else { callback(ResultOk, false); } - } else { - callback(result, false); - } - }); -} - -void ConsumerImpl::brokerGetLastMessageIdListener(Result res, MessageId messageId, - BrokerGetLastMessageIdCallback callback) { - Lock lock(mutex_); - if (messageId > lastMessageIdInBroker()) { - lastMessageInBroker_ = Optional::of(messageId); - lock.unlock(); - callback(res, messageId); + }); } else { + if (hasMoreMessages(lastMessageIdInBroker_, messageId)) { + lock.unlock(); + callback(ResultOk, true); + return; + } lock.unlock(); - callback(res, lastMessageIdInBroker()); + + getLastMessageIdAsync([callback, messageId](Result result, const GetLastMessageIdResponse& response) { + callback(result, (result == ResultOk) && hasMoreMessages(response.getLastMessageId(), messageId)); + }); } } void ConsumerImpl::getLastMessageIdAsync(BrokerGetLastMessageIdCallback callback) { - Lock lock(mutex_); - if (state_ == Closed || state_ == Closing) { - lock.unlock(); + const auto state = state_.load(); + if (state == Closed || state == Closing) { LOG_ERROR(getName() << "Client connection already closed."); if (callback) { callback(ResultAlreadyClosed, MessageId()); } return; } - lock.unlock(); ClientConnectionPtr cnx = getCnx().lock(); if (cnx) { @@ -1222,9 +1200,19 @@ void ConsumerImpl::getLastMessageIdAsync(BrokerGetLastMessageIdCallback callback LOG_DEBUG(getName() << " Sending getLastMessageId Command for Consumer - " << getConsumerId() << ", requestId - " << requestId); + auto self = shared_from_this(); cnx->newGetLastMessageId(consumerId_, requestId) - .addListener(std::bind(&ConsumerImpl::brokerGetLastMessageIdListener, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, callback)); + .addListener([this, self, callback](Result result, const GetLastMessageIdResponse& response) { + if (result == ResultOk) { + LOG_DEBUG(getName() << "getLastMessageId: " << response); + Lock lock(mutexForMessageId_); + lastMessageIdInBroker_ = response.getLastMessageId(); + lock.unlock(); + } else { + LOG_ERROR(getName() << "Failed to getLastMessageId: " << result); + } + callback(result, response); + }); } else { LOG_ERROR(getName() << " Operation not supported since server protobuf version " << cnx->getServerProtocolVersion() << " is older than proto::v12"); @@ -1248,10 +1236,7 @@ void ConsumerImpl::trackMessage(const Message& msg) { } } -bool ConsumerImpl::isConnected() const { - Lock lock(mutex_); - return !getCnx().expired() && state_ == Ready; -} +bool ConsumerImpl::isConnected() const { return !getCnx().expired() && state_ == Ready; } uint64_t ConsumerImpl::getNumberOfConnectedConsumer() { return isConnected() ? 1 : 0; } diff --git a/pulsar-client-cpp/lib/ConsumerImpl.h b/pulsar-client-cpp/lib/ConsumerImpl.h index 0754a89fdf5fc..4db868c9f853a 100644 --- a/pulsar-client-cpp/lib/ConsumerImpl.h +++ b/pulsar-client-cpp/lib/ConsumerImpl.h @@ -33,6 +33,7 @@ #include "lib/UnAckedMessageTrackerDisabled.h" #include "MessageCrypto.h" #include "AckGroupingTracker.h" +#include "GetLastMessageIdResponse.h" #include "CompressionCodec.h" #include @@ -53,7 +54,7 @@ class ExecutorService; class ConsumerImpl; class BatchAcknowledgementTracker; typedef std::shared_ptr MessageCryptoPtr; -typedef std::function BrokerGetLastMessageIdCallback; +typedef std::function BrokerGetLastMessageIdCallback; enum ConsumerTopicType { @@ -191,10 +192,8 @@ class ConsumerImpl : public ConsumerImplBase, bool hasParent_; ConsumerTopicType consumerTopicType_; - Commands::SubscriptionMode subscriptionMode_; - Optional startMessageId_; + const Commands::SubscriptionMode subscriptionMode_; - Optional lastDequedMessage_; UnboundedBlockingQueue incomingMessages_; std::queue pendingReceives_; std::atomic_int availablePermits_; @@ -215,23 +214,16 @@ class ConsumerImpl : public ConsumerImplBase, MessageCryptoPtr msgCrypto_; const bool readCompacted_; - Optional lastMessageInBroker_; - void brokerGetLastMessageIdListener(Result res, MessageId messageId, - BrokerGetLastMessageIdCallback callback); - - const MessageId& lastMessageIdDequed() { - return lastDequedMessage_.is_present() ? lastDequedMessage_.value() : MessageId::earliest(); - } - - const MessageId& lastMessageIdInBroker() { - return lastMessageInBroker_.is_present() ? lastMessageInBroker_.value() : MessageId::earliest(); - } + // Make the access to `startMessageId_`, `lastDequedMessageId_` and `lastMessageIdInBroker_` thread safe + mutable std::mutex mutexForMessageId_; + Optional startMessageId_; + MessageId lastDequedMessageId_{MessageId::earliest()}; + MessageId lastMessageIdInBroker_{MessageId::earliest()}; friend class PulsarFriend; // these two declared friend to access setNegativeAcknowledgeEnabledForTesting friend class MultiTopicsConsumerImpl; - friend class PartitionedConsumerImpl; FRIEND_TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery); FRIEND_TEST(ConsumerTest, testMultiTopicsConsumerUnAckedMessageRedelivery); diff --git a/pulsar-client-cpp/lib/ExecutorService.cc b/pulsar-client-cpp/lib/ExecutorService.cc index 4db31124fc0e6..b9b5ed464784d 100644 --- a/pulsar-client-cpp/lib/ExecutorService.cc +++ b/pulsar-client-cpp/lib/ExecutorService.cc @@ -21,28 +21,52 @@ #include #include #include +#include "TimeUtils.h" #include "LogUtils.h" DECLARE_LOG_OBJECT() namespace pulsar { -ExecutorService::ExecutorService() - : io_service_(new boost::asio::io_service()), - work_(new BackgroundWork(*io_service_)), - worker_(std::bind(&ExecutorService::startWorker, this, io_service_)) {} +ExecutorService::ExecutorService() {} -ExecutorService::~ExecutorService() { close(); } +ExecutorService::~ExecutorService() { close(0); } -void ExecutorService::startWorker(std::shared_ptr io_service) { io_service_->run(); } +void ExecutorService::start() { + auto self = shared_from_this(); + std::thread t{[self] { + if (self->isClosed()) { + return; + } + LOG_INFO("Run io_service in a single thread"); + boost::system::error_code ec; + self->getIOService().run(ec); + if (ec) { + LOG_ERROR("Failed to run io_service: " << ec.message()); + } else { + LOG_INFO("Event loop of ExecutorService exits successfully"); + } + self->ioServiceDone_ = true; + self->cond_.notify_all(); + }}; + t.detach(); +} + +ExecutorServicePtr ExecutorService::create() { + // make_shared cannot access the private constructor, so we need to expose the private constructor via a + // derived class. + struct ExecutorServiceImpl : public ExecutorService {}; + + auto executor = std::make_shared(); + executor->start(); + return std::static_pointer_cast(executor); +} /* * factory method of boost::asio::ip::tcp::socket associated with io_service_ instance * @ returns shared_ptr to this socket */ -SocketPtr ExecutorService::createSocket() { - return SocketPtr(new boost::asio::ip::tcp::socket(*io_service_)); -} +SocketPtr ExecutorService::createSocket() { return SocketPtr(new boost::asio::ip::tcp::socket(io_service_)); } TlsSocketPtr ExecutorService::createTlsSocket(SocketPtr &socket, boost::asio::ssl::context &ctx) { return std::shared_ptr >( @@ -54,34 +78,33 @@ TlsSocketPtr ExecutorService::createTlsSocket(SocketPtr &socket, boost::asio::ss * @returns shraed_ptr to resolver object */ TcpResolverPtr ExecutorService::createTcpResolver() { - return TcpResolverPtr(new boost::asio::ip::tcp::resolver(*io_service_)); + return TcpResolverPtr(new boost::asio::ip::tcp::resolver(io_service_)); } DeadlineTimerPtr ExecutorService::createDeadlineTimer() { - return DeadlineTimerPtr(new boost::asio::deadline_timer(*io_service_)); + return DeadlineTimerPtr(new boost::asio::deadline_timer(io_service_)); } -void ExecutorService::close() { +void ExecutorService::close(long timeoutMs) { bool expectedState = false; if (!closed_.compare_exchange_strong(expectedState, true)) { return; } + if (timeoutMs == 0) { // non-blocking + io_service_.stop(); + return; + } - io_service_->stop(); - work_.reset(); - // Detach the worker thread instead of join to avoid potential deadlock - if (worker_.joinable()) { - try { - worker_.detach(); - } catch (const std::system_error &e) { - // This condition will happen if we're forking the process, therefore the thread was not ported to - // the child side of the fork and the detach would be failing. - LOG_DEBUG("Failed to detach thread: " << e.what()); - } + std::unique_lock lock{mutex_}; + io_service_.stop(); + if (timeoutMs > 0) { + cond_.wait_for(lock, std::chrono::milliseconds(timeoutMs), [this] { return ioServiceDone_.load(); }); + } else { // < 0 + cond_.wait(lock, [this] { return ioServiceDone_.load(); }); } } -void ExecutorService::postWork(std::function task) { io_service_->post(task); } +void ExecutorService::postWork(std::function task) { io_service_.post(task); } ///////////////////// @@ -93,20 +116,23 @@ ExecutorServicePtr ExecutorServiceProvider::get() { int idx = executorIdx_++ % executors_.size(); if (!executors_[idx]) { - executors_[idx] = std::make_shared(); + executors_[idx] = ExecutorService::create(); } return executors_[idx]; } -void ExecutorServiceProvider::close() { +void ExecutorServiceProvider::close(long timeoutMs) { Lock lock(mutex_); - for (ExecutorList::iterator it = executors_.begin(); it != executors_.end(); ++it) { - if (*it != NULL) { - (*it)->close(); + TimeoutProcessor timeoutProcessor{timeoutMs}; + for (auto &&executor : executors_) { + timeoutProcessor.tik(); + if (executor) { + executor->close(timeoutProcessor.getLeftTimeout()); } - it->reset(); + timeoutProcessor.tok(); + executor.reset(); } } } // namespace pulsar diff --git a/pulsar-client-cpp/lib/ExecutorService.h b/pulsar-client-cpp/lib/ExecutorService.h index 6746936190566..e4cbb3ce62ef0 100644 --- a/pulsar-client-cpp/lib/ExecutorService.h +++ b/pulsar-client-cpp/lib/ExecutorService.h @@ -20,12 +20,13 @@ #define _PULSAR_EXECUTOR_SERVICE_HEADER_ #include +#include +#include #include #include #include #include #include -#include #include #include @@ -34,51 +35,52 @@ typedef std::shared_ptr SocketPtr; typedef std::shared_ptr > TlsSocketPtr; typedef std::shared_ptr TcpResolverPtr; typedef std::shared_ptr DeadlineTimerPtr; -class PULSAR_PUBLIC ExecutorService : private boost::noncopyable { - friend class ClientConnection; - +class PULSAR_PUBLIC ExecutorService : public std::enable_shared_from_this { public: - ExecutorService(); + using IOService = boost::asio::io_service; + using SharedPtr = std::shared_ptr; + + static SharedPtr create(); ~ExecutorService(); + ExecutorService(const ExecutorService &) = delete; + ExecutorService &operator=(const ExecutorService &) = delete; + SocketPtr createSocket(); - TlsSocketPtr createTlsSocket(SocketPtr &socket, boost::asio::ssl::context &ctx); + static TlsSocketPtr createTlsSocket(SocketPtr &socket, boost::asio::ssl::context &ctx); TcpResolverPtr createTcpResolver(); DeadlineTimerPtr createDeadlineTimer(); void postWork(std::function task); - void close(); - boost::asio::io_service &getIOService() { return *io_service_; } + // See TimeoutProcessor for the semantics of the parameter. + void close(long timeoutMs = 3000); - private: - /* - * only called once and within lock so no need to worry about thread-safety - */ - void startWorker(std::shared_ptr io_service); + IOService &getIOService() { return io_service_; } + bool isClosed() const noexcept { return closed_; } + private: /* * io_service is our interface to os, io object schedule async ops on this object */ - std::shared_ptr io_service_; + IOService io_service_; /* * work will not let io_service.run() return even after it has finished work * it will keep it running in the background so we don't have to take care of it */ - typedef boost::asio::io_service::work BackgroundWork; - std::unique_ptr work_; - - /* - * worker thread which runs until work object is destroyed, it's running io_service::run in - * background invoking async handlers as they are finished and result is available from - * io_service - */ - std::thread worker_; + IOService::work work_{io_service_}; std::atomic_bool closed_{false}; + std::mutex mutex_; + std::condition_variable cond_; + std::atomic_bool ioServiceDone_{false}; + + ExecutorService(); + + void start(); }; -typedef std::shared_ptr ExecutorServicePtr; +using ExecutorServicePtr = ExecutorService::SharedPtr; class PULSAR_PUBLIC ExecutorServiceProvider { public: @@ -86,7 +88,8 @@ class PULSAR_PUBLIC ExecutorServiceProvider { ExecutorServicePtr get(); - void close(); + // See TimeoutProcessor for the semantics of the parameter. + void close(long timeoutMs = 3000); private: typedef std::vector ExecutorList; diff --git a/pulsar-client-cpp/lib/Future.h b/pulsar-client-cpp/lib/Future.h index cafb63f11f74c..b695e5e8c2a95 100644 --- a/pulsar-client-cpp/lib/Future.h +++ b/pulsar-client-cpp/lib/Future.h @@ -90,7 +90,8 @@ class Promise { public: Promise() : state_(std::make_shared >()) {} - bool setValue(const Type& value) { + bool setValue(const Type& value) const { + static Result DEFAULT_RESULT; InternalState* state = state_.get(); Lock lock(state->mutex); @@ -99,21 +100,24 @@ class Promise { } state->value = value; - state->result = Result(); + state->result = DEFAULT_RESULT; state->complete = true; - typename std::list::iterator it; - for (it = state->listeners.begin(); it != state->listeners.end(); ++it) { - ListenerCallback& callback = *it; - callback(state->result, state->value); + decltype(state->listeners) listeners; + listeners.swap(state->listeners); + + lock.unlock(); + + for (auto& callback : listeners) { + callback(DEFAULT_RESULT, value); } - state->listeners.clear(); state->condition.notify_all(); return true; } - bool setFailed(Result result) { + bool setFailed(Result result) const { + static Type DEFAULT_VALUE; InternalState* state = state_.get(); Lock lock(state->mutex); @@ -124,13 +128,15 @@ class Promise { state->result = result; state->complete = true; - typename std::list::iterator it; - for (it = state->listeners.begin(); it != state->listeners.end(); ++it) { - ListenerCallback& callback = *it; - callback(state->result, state->value); + decltype(state->listeners) listeners; + listeners.swap(state->listeners); + + lock.unlock(); + + for (auto& callback : listeners) { + callback(result, DEFAULT_VALUE); } - state->listeners.clear(); state->condition.notify_all(); return true; } diff --git a/pulsar-client-cpp/lib/GetLastMessageIdResponse.h b/pulsar-client-cpp/lib/GetLastMessageIdResponse.h new file mode 100644 index 0000000000000..0acb78394e115 --- /dev/null +++ b/pulsar-client-cpp/lib/GetLastMessageIdResponse.h @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#pragma once + +#include +#include + +namespace pulsar { + +class GetLastMessageIdResponse { + friend std::ostream& operator<<(std::ostream& os, const GetLastMessageIdResponse& response) { + os << "lastMessageId: " << response.lastMessageId_; + if (response.hasMarkDeletePosition_) { + os << ", markDeletePosition: " << response.markDeletePosition_; + } + return os; + } + + public: + GetLastMessageIdResponse() = default; + + GetLastMessageIdResponse(const MessageId& lastMessageId) + : lastMessageId_(lastMessageId), hasMarkDeletePosition_{false} {} + + GetLastMessageIdResponse(const MessageId& lastMessageId, const MessageId& markDeletePosition) + : lastMessageId_(lastMessageId), + markDeletePosition_(markDeletePosition), + hasMarkDeletePosition_(true) {} + + const MessageId& getLastMessageId() const noexcept { return lastMessageId_; } + const MessageId& getMarkDeletePosition() const noexcept { return markDeletePosition_; } + bool hasMarkDeletePosition() const noexcept { return hasMarkDeletePosition_; } + + private: + MessageId lastMessageId_; + MessageId markDeletePosition_; + bool hasMarkDeletePosition_; +}; + +} // namespace pulsar diff --git a/pulsar-client-cpp/lib/HTTPLookupService.cc b/pulsar-client-cpp/lib/HTTPLookupService.cc index a54a4c1f4f770..340f67c050e53 100644 --- a/pulsar-client-cpp/lib/HTTPLookupService.cc +++ b/pulsar-client-cpp/lib/HTTPLookupService.cc @@ -38,6 +38,8 @@ const static int MAX_HTTP_REDIRECTS = 20; const static std::string PARTITION_METHOD_NAME = "partitions"; const static int NUMBER_OF_LOOKUP_THREADS = 1; +static inline bool needRedirection(long code) { return (code == 307 || code == 302 || code == 301); } + HTTPLookupService::CurlInitializer::CurlInitializer() { // Once per application - https://curl.haxx.se/mail/lib-2015-11/0052.html curl_global_init(CURL_GLOBAL_ALL); @@ -148,132 +150,145 @@ void HTTPLookupService::handleNamespaceTopicsHTTPRequest(NamespaceTopicsPromise } } -Result HTTPLookupService::sendHTTPRequest(const std::string completeUrl, std::string &responseData) { - CURL *handle; - CURLcode res; - std::string version = std::string("Pulsar-CPP-v") + _PULSAR_VERSION_; - handle = curl_easy_init(); - - if (!handle) { - LOG_ERROR("Unable to curl_easy_init for url " << completeUrl); - // No curl_easy_cleanup required since handle not initialized - return ResultLookupError; - } - // set URL - curl_easy_setopt(handle, CURLOPT_URL, completeUrl.c_str()); - - // Write callback - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, curlWriteCallback); - curl_easy_setopt(handle, CURLOPT_WRITEDATA, &responseData); - - // New connection is made for each call - curl_easy_setopt(handle, CURLOPT_FRESH_CONNECT, 1L); - curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); +Result HTTPLookupService::sendHTTPRequest(std::string completeUrl, std::string &responseData) { + uint16_t reqCount = 0; + Result retResult = ResultOk; + while (++reqCount <= MAX_HTTP_REDIRECTS) { + CURL *handle; + CURLcode res; + std::string version = std::string("Pulsar-CPP-v") + _PULSAR_VERSION_INTERNAL_; + handle = curl_easy_init(); + + if (!handle) { + LOG_ERROR("Unable to curl_easy_init for url " << completeUrl); + // No curl_easy_cleanup required since handle not initialized + return ResultLookupError; + } + // set URL + curl_easy_setopt(handle, CURLOPT_URL, completeUrl.c_str()); - // Skipping signal handling - results in timeouts not honored during the DNS lookup - curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1L); + // Write callback + curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, curlWriteCallback); + curl_easy_setopt(handle, CURLOPT_WRITEDATA, &responseData); - // Timer - curl_easy_setopt(handle, CURLOPT_TIMEOUT, lookupTimeoutInSeconds_); + // New connection is made for each call + curl_easy_setopt(handle, CURLOPT_FRESH_CONNECT, 1L); + curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); - // Set User Agent - curl_easy_setopt(handle, CURLOPT_USERAGENT, version.c_str()); + // Skipping signal handling - results in timeouts not honored during the DNS lookup + curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1L); - // Redirects - curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1L); - curl_easy_setopt(handle, CURLOPT_MAXREDIRS, MAX_HTTP_REDIRECTS); + // Timer + curl_easy_setopt(handle, CURLOPT_TIMEOUT, lookupTimeoutInSeconds_); - // Fail if HTTP return code >=400 - curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1L); + // Set User Agent + curl_easy_setopt(handle, CURLOPT_USERAGENT, version.c_str()); - // Authorization data - AuthenticationDataPtr authDataContent; - Result authResult = authenticationPtr_->getAuthData(authDataContent); - if (authResult != ResultOk) { - LOG_ERROR("Failed to getAuthData: " << authResult); - curl_easy_cleanup(handle); - return authResult; - } - struct curl_slist *list = NULL; - if (authDataContent->hasDataForHttp()) { - list = curl_slist_append(list, authDataContent->getHttpHeaders().c_str()); - } - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, list); + // Fail if HTTP return code >=400 + curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1L); - // TLS - if (isUseTls_) { - if (curl_easy_setopt(handle, CURLOPT_SSLENGINE, NULL) != CURLE_OK) { - LOG_ERROR("Unable to load SSL engine for url " << completeUrl); + // Authorization data + AuthenticationDataPtr authDataContent; + Result authResult = authenticationPtr_->getAuthData(authDataContent); + if (authResult != ResultOk) { + LOG_ERROR("Failed to getAuthData: " << authResult); curl_easy_cleanup(handle); - return ResultConnectError; + return authResult; } - if (curl_easy_setopt(handle, CURLOPT_SSLENGINE_DEFAULT, 1L) != CURLE_OK) { - LOG_ERROR("Unable to load SSL engine as default, for url " << completeUrl); - curl_easy_cleanup(handle); - return ResultConnectError; + struct curl_slist *list = NULL; + if (authDataContent->hasDataForHttp()) { + list = curl_slist_append(list, authDataContent->getHttpHeaders().c_str()); } - curl_easy_setopt(handle, CURLOPT_SSLCERTTYPE, "PEM"); + curl_easy_setopt(handle, CURLOPT_HTTPHEADER, list); + + // TLS + if (isUseTls_) { + if (curl_easy_setopt(handle, CURLOPT_SSLENGINE, NULL) != CURLE_OK) { + LOG_ERROR("Unable to load SSL engine for url " << completeUrl); + curl_easy_cleanup(handle); + return ResultConnectError; + } + if (curl_easy_setopt(handle, CURLOPT_SSLENGINE_DEFAULT, 1L) != CURLE_OK) { + LOG_ERROR("Unable to load SSL engine as default, for url " << completeUrl); + curl_easy_cleanup(handle); + return ResultConnectError; + } + curl_easy_setopt(handle, CURLOPT_SSLCERTTYPE, "PEM"); - if (tlsAllowInsecure_) { - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 0L); - } else { - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 1L); - } + if (tlsAllowInsecure_) { + curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 0L); + } else { + curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 1L); + } - if (!tlsTrustCertsFilePath_.empty()) { - curl_easy_setopt(handle, CURLOPT_CAINFO, tlsTrustCertsFilePath_.c_str()); - } + if (!tlsTrustCertsFilePath_.empty()) { + curl_easy_setopt(handle, CURLOPT_CAINFO, tlsTrustCertsFilePath_.c_str()); + } - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYHOST, tlsValidateHostname_ ? 1L : 0L); + curl_easy_setopt(handle, CURLOPT_SSL_VERIFYHOST, tlsValidateHostname_ ? 1L : 0L); - if (authDataContent->hasDataForTls()) { - curl_easy_setopt(handle, CURLOPT_SSLCERT, authDataContent->getTlsCertificates().c_str()); - curl_easy_setopt(handle, CURLOPT_SSLKEY, authDataContent->getTlsPrivateKey().c_str()); + if (authDataContent->hasDataForTls()) { + curl_easy_setopt(handle, CURLOPT_SSLCERT, authDataContent->getTlsCertificates().c_str()); + curl_easy_setopt(handle, CURLOPT_SSLKEY, authDataContent->getTlsPrivateKey().c_str()); + } } - } - - LOG_INFO("Curl Lookup Request sent for " << completeUrl); - - // Make get call to server - res = curl_easy_perform(handle); - - // Free header list - curl_slist_free_all(list); - Result retResult = ResultOk; - - switch (res) { - case CURLE_OK: - long response_code; - curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); - LOG_INFO("Response received for url " << completeUrl << " code " << response_code); - if (response_code == 200) { - retResult = ResultOk; - } else { + LOG_INFO("Curl [" << reqCount << "] Lookup Request sent for " << completeUrl); + + // Make get call to server + res = curl_easy_perform(handle); + + long response_code = -1; + curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); + LOG_INFO("Response received for url " << completeUrl << " response_code " << response_code + << " curl res " << res); + + // Free header list + curl_slist_free_all(list); + + switch (res) { + case CURLE_OK: + long response_code; + curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &response_code); + LOG_INFO("Response received for url " << completeUrl << " code " << response_code); + if (response_code == 200) { + retResult = ResultOk; + } else if (needRedirection(response_code)) { + char *url = NULL; + curl_easy_getinfo(handle, CURLINFO_REDIRECT_URL, &url); + LOG_INFO("Response from url " << completeUrl << " to new url " << url); + completeUrl = url; + retResult = ResultLookupError; + } else { + retResult = ResultLookupError; + } + break; + case CURLE_COULDNT_CONNECT: + case CURLE_COULDNT_RESOLVE_PROXY: + case CURLE_COULDNT_RESOLVE_HOST: + case CURLE_HTTP_RETURNED_ERROR: + LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); + retResult = ResultConnectError; + break; + case CURLE_READ_ERROR: + LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); + retResult = ResultReadError; + break; + case CURLE_OPERATION_TIMEDOUT: + LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); + retResult = ResultTimeout; + break; + default: + LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); retResult = ResultLookupError; - } - break; - case CURLE_COULDNT_CONNECT: - case CURLE_COULDNT_RESOLVE_PROXY: - case CURLE_COULDNT_RESOLVE_HOST: - case CURLE_HTTP_RETURNED_ERROR: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultConnectError; - break; - case CURLE_READ_ERROR: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultReadError; - break; - case CURLE_OPERATION_TIMEDOUT: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultTimeout; - break; - default: - LOG_ERROR("Response failed for url " << completeUrl << ". Error Code " << res); - retResult = ResultLookupError; + break; + } + curl_easy_cleanup(handle); + if (!needRedirection(response_code)) { break; + } } - curl_easy_cleanup(handle); + return retResult; } diff --git a/pulsar-client-cpp/lib/HTTPLookupService.h b/pulsar-client-cpp/lib/HTTPLookupService.h index 166a14a03b957..3d0d39ee90a97 100644 --- a/pulsar-client-cpp/lib/HTTPLookupService.h +++ b/pulsar-client-cpp/lib/HTTPLookupService.h @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace pulsar { class HTTPLookupService : public LookupService, public std::enable_shared_from_this { @@ -57,7 +57,7 @@ class HTTPLookupService : public LookupService, public std::enable_shared_from_t void handleLookupHTTPRequest(LookupPromise, const std::string, RequestType); void handleNamespaceTopicsHTTPRequest(NamespaceTopicsPromise promise, const std::string completeUrl); - Result sendHTTPRequest(const std::string completeUrl, std::string& responseData); + Result sendHTTPRequest(std::string completeUrl, std::string& responseData); public: HTTPLookupService(const std::string&, const ClientConfiguration&, const AuthenticationPtr&); diff --git a/pulsar-client-cpp/lib/HandlerBase.cc b/pulsar-client-cpp/lib/HandlerBase.cc index d7025ad004b15..5d2244f7552df 100644 --- a/pulsar-client-cpp/lib/HandlerBase.cc +++ b/pulsar-client-cpp/lib/HandlerBase.cc @@ -43,12 +43,9 @@ HandlerBase::HandlerBase(const ClientImplPtr& client, const std::string& topic, HandlerBase::~HandlerBase() { timer_->cancel(); } void HandlerBase::start() { - Lock lock(mutex_); // guard against concurrent state changes such as closing - if (state_ == NotStarted) { - state_ = Pending; - lock.unlock(); - + State state = NotStarted; + if (state_.compare_exchange_strong(state, Pending)) { grabCnx(); } } @@ -97,7 +94,6 @@ void HandlerBase::handleDisconnection(Result result, ClientConnectionWeakPtr con return; } - Lock lock(handler->mutex_); State state = handler->state_; ClientConnectionPtr currentConnection = handler->connection_.lock(); @@ -135,7 +131,8 @@ bool HandlerBase::isRetriableError(Result result) { } void HandlerBase::scheduleReconnection(HandlerBasePtr handler) { - if (handler->state_ == Pending || handler->state_ == Ready) { + const auto state = handler->state_.load(); + if (state == Pending || state == Ready) { TimeDuration delay = handler->backoff_.next(); LOG_INFO(handler->getName() << "Schedule reconnection in " << (delay.total_milliseconds() / 1000.0) diff --git a/pulsar-client-cpp/lib/HandlerBase.h b/pulsar-client-cpp/lib/HandlerBase.h index eeb8ebe1c5e8d..1184746da21ba 100644 --- a/pulsar-client-cpp/lib/HandlerBase.h +++ b/pulsar-client-cpp/lib/HandlerBase.h @@ -105,7 +105,7 @@ class HandlerBase { Failed }; - State state_; + std::atomic state_; Backoff backoff_; uint64_t epoch_; diff --git a/pulsar-client-cpp/lib/Message.cc b/pulsar-client-cpp/lib/Message.cc index 76e408ffef428..b928945cfae21 100644 --- a/pulsar-client-cpp/lib/Message.cc +++ b/pulsar-client-cpp/lib/Message.cc @@ -79,12 +79,38 @@ Message::Message(const MessageId& messageID, proto::MessageMetadata& metadata, S impl_->metadata.mutable_properties()->CopyFrom(singleMetadata.properties()); impl_->topicName_ = &topicName; + impl_->metadata.clear_properties(); + if (singleMetadata.properties_size() > 0) { + impl_->metadata.mutable_properties()->Reserve(singleMetadata.properties_size()); + for (int i = 0; i < singleMetadata.properties_size(); i++) { + auto keyValue = proto::KeyValue().New(); + *keyValue = singleMetadata.properties(i); + impl_->metadata.mutable_properties()->AddAllocated(keyValue); + } + } + if (singleMetadata.has_partition_key()) { impl_->metadata.set_partition_key(singleMetadata.partition_key()); + } else { + impl_->metadata.clear_partition_key(); + } + + if (singleMetadata.has_ordering_key()) { + impl_->metadata.set_ordering_key(singleMetadata.ordering_key()); + } else { + impl_->metadata.clear_ordering_key(); } if (singleMetadata.has_event_time()) { impl_->metadata.set_event_time(singleMetadata.event_time()); + } else { + impl_->metadata.clear_event_time(); + } + + if (singleMetadata.has_sequence_id()) { + impl_->metadata.set_sequence_id(singleMetadata.sequence_id()); + } else { + impl_->metadata.clear_sequence_id(); } } diff --git a/pulsar-client-cpp/tests/ReaderTest.h b/pulsar-client-cpp/lib/MessageIdUtil.h similarity index 53% rename from pulsar-client-cpp/tests/ReaderTest.h rename to pulsar-client-cpp/lib/MessageIdUtil.h index fd0387f1d0662..d6f80a10ea015 100644 --- a/pulsar-client-cpp/tests/ReaderTest.h +++ b/pulsar-client-cpp/lib/MessageIdUtil.h @@ -16,17 +16,29 @@ * specific language governing permissions and limitations * under the License. */ -#include "lib/ReaderImpl.h" -#include - -using std::string; +#include +#include "PulsarApi.pb.h" namespace pulsar { -class ReaderTest { - public: - static ConsumerImplPtr getConsumer(const Reader& reader) { return reader.impl_->getConsumer(); } - static ReaderImplWeakPtr getReaderImplWeakPtr(const Reader& reader) { - return reader.impl_->getReaderImplWeakPtr(); + +inline MessageId toMessageId(const proto::MessageIdData& messageIdData) { + return MessageId{messageIdData.partition(), static_cast(messageIdData.ledgerid()), + static_cast(messageIdData.entryid()), messageIdData.batch_index()}; +} + +namespace internal { +template +static int compare(T lhs, T rhs) { + return (lhs < rhs) ? -1 : ((lhs == rhs) ? 0 : 1); +} +} // namespace internal + +inline int compareLedgerAndEntryId(const MessageId& lhs, const MessageId& rhs) { + auto result = internal::compare(lhs.ledgerId(), rhs.ledgerId()); + if (result != 0) { + return result; } -}; + return internal::compare(lhs.entryId(), rhs.entryId()); +} + } // namespace pulsar diff --git a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc index 4e31e64d5ee56..5fe9446b18668 100644 --- a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc +++ b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.cc @@ -17,6 +17,7 @@ * under the License. */ #include "MultiTopicsConsumerImpl.h" +#include "MultiResultCallback.h" DECLARE_LOG_OBJECT() @@ -25,7 +26,7 @@ using namespace pulsar; MultiTopicsConsumerImpl::MultiTopicsConsumerImpl(ClientImplPtr client, const std::vector& topics, const std::string& subscriptionName, TopicNamePtr topicName, const ConsumerConfiguration& conf, - const LookupServicePtr lookupServicePtr) + LookupServicePtr lookupServicePtr) : client_(client), subscriptionName_(subscriptionName), topic_(topicName ? topicName->toString() : "EmptyTopics"), @@ -52,11 +53,18 @@ MultiTopicsConsumerImpl::MultiTopicsConsumerImpl(ClientImplPtr client, const std } else { unAckedMessageTrackerPtr_.reset(new UnAckedMessageTrackerDisabled()); } + auto partitionsUpdateInterval = static_cast(client_->conf().getPartitionsUpdateInterval()); + if (partitionsUpdateInterval > 0) { + partitionsUpdateTimer_ = listenerExecutor_->createDeadlineTimer(); + partitionsUpdateInterval_ = boost::posix_time::seconds(partitionsUpdateInterval); + lookupServicePtr_ = client_->getLookup(); + } } void MultiTopicsConsumerImpl::start() { if (topics_.empty()) { - if (compareAndSetState(Pending, Ready)) { + MultiTopicsConsumerState state = Pending; + if (state_.compare_exchange_strong(state, Ready)) { LOG_DEBUG("No topics passed in when create MultiTopicsConsumer."); multiTopicsConsumerCreatedPromise_.setValue(shared_from_this()); return; @@ -81,27 +89,28 @@ void MultiTopicsConsumerImpl::start() { void MultiTopicsConsumerImpl::handleOneTopicSubscribed(Result result, Consumer consumer, const std::string& topic, std::shared_ptr> topicsNeedCreate) { - (*topicsNeedCreate)--; - if (result != ResultOk) { - setState(Failed); + state_ = Failed; + // Use the first failed result + auto expectedResult = ResultOk; + failedResult.compare_exchange_strong(expectedResult, result); LOG_ERROR("Failed when subscribed to topic " << topic << " in TopicsConsumer. Error - " << result); + } else { + LOG_DEBUG("Subscribed to topic " << topic << " in TopicsConsumer "); } - LOG_DEBUG("Subscribed to topic " << topic << " in TopicsConsumer "); - - if (topicsNeedCreate->load() == 0) { - if (compareAndSetState(Pending, Ready)) { + if (--(*topicsNeedCreate) == 0) { + MultiTopicsConsumerState state = Pending; + if (state_.compare_exchange_strong(state, Ready)) { LOG_INFO("Successfully Subscribed to Topics"); multiTopicsConsumerCreatedPromise_.setValue(shared_from_this()); } else { LOG_ERROR("Unable to create Consumer - " << consumerStr_ << " Error - " << result); // unsubscribed all of the successfully subscribed partitioned consumers - closeAsync(nullptr); - multiTopicsConsumerCreatedPromise_.setFailed(result); - return; + // It's safe to capture only this here, because the callback can be called only when this is valid + closeAsync( + [this](Result result) { multiTopicsConsumerCreatedPromise_.setFailed(failedResult.load()); }); } - return; } } @@ -115,32 +124,40 @@ Future MultiTopicsConsumerImpl::subscribeOneTopicAsync(const s return topicPromise->getFuture(); } - if (state_ == Closed || state_ == Closing) { + const auto state = state_.load(); + if (state == Closed || state == Closing) { LOG_ERROR("MultiTopicsConsumer already closed when subscribe."); topicPromise->setFailed(ResultAlreadyClosed); return topicPromise->getFuture(); } // subscribe for each partition, when all partitions completed, complete promise - lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener(std::bind( - &MultiTopicsConsumerImpl::subscribeTopicPartitions, shared_from_this(), std::placeholders::_1, - std::placeholders::_2, topicName, subscriptionName_, conf_, topicPromise)); + Lock lock(mutex_); + auto entry = topicsPartitions_.find(topic); + if (entry == topicsPartitions_.end()) { + lock.unlock(); + lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( + [this, topicName, topicPromise](Result result, const LookupDataResultPtr& lookupDataResult) { + if (result != ResultOk) { + LOG_ERROR("Error Checking/Getting Partition Metadata while MultiTopics Subscribing- " + << consumerStr_ << " result: " << result) + topicPromise->setFailed(result); + return; + } + subscribeTopicPartitions(lookupDataResult->getPartitions(), topicName, subscriptionName_, + topicPromise); + }); + } else { + auto numPartitions = entry->second; + lock.unlock(); + subscribeTopicPartitions(numPartitions, topicName, subscriptionName_, topicPromise); + } return topicPromise->getFuture(); } -void MultiTopicsConsumerImpl::subscribeTopicPartitions(const Result result, - const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, +void MultiTopicsConsumerImpl::subscribeTopicPartitions(int numPartitions, TopicNamePtr topicName, const std::string& consumerName, - ConsumerConfiguration conf, ConsumerSubResultPromisePtr topicSubResultPromise) { - if (result != ResultOk) { - LOG_ERROR("Error Checking/Getting Partition Metadata while MultiTopics Subscribing- " - << consumerStr_ << " result: " << result) - topicSubResultPromise->setFailed(result); - return; - } - std::shared_ptr consumer; ConsumerConfiguration config = conf_.clone(); ExecutorServicePtr internalListenerExecutor = client_->getPartitionListenerExecutorProvider()->get(); @@ -148,7 +165,6 @@ void MultiTopicsConsumerImpl::subscribeTopicPartitions(const Result result, config.setMessageListener(std::bind(&MultiTopicsConsumerImpl::messageReceived, shared_from_this(), std::placeholders::_1, std::placeholders::_2)); - int numPartitions = partitionMetadata->getPartitions(); int partitions = numPartitions == 0 ? 1 : numPartitions; // Apply total limit of receiver queue size across partitions @@ -157,7 +173,7 @@ void MultiTopicsConsumerImpl::subscribeTopicPartitions(const Result result, (int)(conf_.getMaxTotalReceiverQueueSizeAcrossPartitions() / partitions))); Lock lock(mutex_); - topicsPartitions_.insert(std::make_pair(topicName->toString(), partitions)); + topicsPartitions_[topicName->toString()] = partitions; lock.unlock(); numberTopicPartitions_->fetch_add(partitions); @@ -171,7 +187,7 @@ void MultiTopicsConsumerImpl::subscribeTopicPartitions(const Result result, consumer->getConsumerCreatedFuture().addListener(std::bind( &MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); - consumers_.insert(std::make_pair(topicName->toString(), consumer)); + consumers_.emplace(topicName->toString(), consumer); LOG_DEBUG("Creating Consumer for - " << topicName << " - " << consumerStr_); consumer->start(); @@ -184,7 +200,7 @@ void MultiTopicsConsumerImpl::subscribeTopicPartitions(const Result result, &MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); consumer->setPartitionIndex(i); - consumers_.insert(std::make_pair(topicPartitionName, consumer)); + consumers_.emplace(topicPartitionName, consumer); LOG_DEBUG("Creating Consumer for - " << topicPartitionName << " - " << consumerStr_); consumer->start(); } @@ -211,10 +227,13 @@ void MultiTopicsConsumerImpl::handleSingleConsumerCreated( return; } - LOG_DEBUG("Successfully Subscribed to a single partition of topic in TopicsConsumer. " - << "Partitions need to create - " << previous - 1); + LOG_INFO("Successfully Subscribed to a single partition of topic in TopicsConsumer. " + << "Partitions need to create : " << previous - 1); if (partitionsNeedCreate->load() == 0) { + if (partitionsUpdateTimer_) { + runPartitionUpdateTask(); + } topicSubResultPromise->setValue(Consumer(shared_from_this())); } } @@ -222,30 +241,27 @@ void MultiTopicsConsumerImpl::handleSingleConsumerCreated( void MultiTopicsConsumerImpl::unsubscribeAsync(ResultCallback callback) { LOG_INFO("[ Topics Consumer " << topic_ << "," << subscriptionName_ << "] Unsubscribing"); - Lock lock(mutex_); - if (state_ == Closing || state_ == Closed) { + const auto state = state_.load(); + if (state == Closing || state == Closed) { LOG_INFO(consumerStr_ << " already closed"); - lock.unlock(); callback(ResultAlreadyClosed); return; } state_ = Closing; - lock.unlock(); - if (consumers_.empty()) { + std::shared_ptr> consumerUnsubed = std::make_shared>(0); + auto self = shared_from_this(); + int numConsumers = 0; + consumers_.forEachValue( + [&numConsumers, &consumerUnsubed, &self, callback](const ConsumerImplPtr& consumer) { + numConsumers++; + consumer->unsubscribeAsync([self, consumerUnsubed, callback](Result result) { + self->handleUnsubscribedAsync(result, consumerUnsubed, callback); + }); + }); + if (numConsumers == 0) { // No need to unsubscribe, since the list matching the regex was empty callback(ResultOk); - return; - } - - std::shared_ptr> consumerUnsubed = std::make_shared>(0); - - for (ConsumerMap::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - (consumer->second) - ->unsubscribeAsync(std::bind(&MultiTopicsConsumerImpl::handleUnsubscribedAsync, - shared_from_this(), std::placeholders::_1, consumerUnsubed, - callback)); } } @@ -255,7 +271,7 @@ void MultiTopicsConsumerImpl::handleUnsubscribedAsync(Result result, (*consumerUnsubed)++; if (result != ResultOk) { - setState(Failed); + state_ = Failed; LOG_ERROR("Error Closing one of the consumers in TopicsConsumer, result: " << result << " subscription - " << subscriptionName_); } @@ -267,22 +283,27 @@ void MultiTopicsConsumerImpl::handleUnsubscribedAsync(Result result, unAckedMessageTrackerPtr_->clear(); Result result1 = (state_ != Failed) ? ResultOk : ResultUnknownError; - setState(Closed); + state_ = Closed; callback(result1); return; } } void MultiTopicsConsumerImpl::unsubscribeOneTopicAsync(const std::string& topic, ResultCallback callback) { + Lock lock(mutex_); std::map::iterator it = topicsPartitions_.find(topic); if (it == topicsPartitions_.end()) { + lock.unlock(); LOG_ERROR("TopicsConsumer does not subscribe topic : " << topic << " subscription - " << subscriptionName_); callback(ResultTopicNotFound); return; } + int numberPartitions = it->second; + lock.unlock(); - if (state_ == Closing || state_ == Closed) { + const auto state = state_.load(); + if (state == Closing || state == Closed) { LOG_ERROR("TopicsConsumer already closed when unsubscribe topic: " << topic << " subscription - " << subscriptionName_); callback(ResultAlreadyClosed); @@ -294,22 +315,21 @@ void MultiTopicsConsumerImpl::unsubscribeOneTopicAsync(const std::string& topic, LOG_ERROR("TopicName invalid: " << topic); callback(ResultUnknownError); } - int numberPartitions = it->second; std::shared_ptr> consumerUnsubed = std::make_shared>(0); for (int i = 0; i < numberPartitions; i++) { std::string topicPartitionName = topicName->getTopicPartitionName(i); - std::map::iterator iterator = consumers_.find(topicPartitionName); - - if (consumers_.end() == iterator) { + auto optConsumer = consumers_.find(topicPartitionName); + if (optConsumer.is_empty()) { LOG_ERROR("TopicsConsumer not subscribed on topicPartitionName: " << topicPartitionName); callback(ResultUnknownError); + continue; } - (iterator->second) - ->unsubscribeAsync(std::bind(&MultiTopicsConsumerImpl::handleOneTopicUnsubscribedAsync, - shared_from_this(), std::placeholders::_1, consumerUnsubed, - numberPartitions, topicName, topicPartitionName, callback)); + optConsumer.value()->unsubscribeAsync( + std::bind(&MultiTopicsConsumerImpl::handleOneTopicUnsubscribedAsync, shared_from_this(), + std::placeholders::_1, consumerUnsubed, numberPartitions, topicName, topicPartitionName, + callback)); } } @@ -319,17 +339,16 @@ void MultiTopicsConsumerImpl::handleOneTopicUnsubscribedAsync( (*consumerUnsubed)++; if (result != ResultOk) { - setState(Failed); + state_ = Failed; LOG_ERROR("Error Closing one of the consumers in TopicsConsumer, result: " << result << " topicPartitionName - " << topicPartitionName); } LOG_DEBUG("Successfully Unsubscribed one Consumer. topicPartitionName - " << topicPartitionName); - std::map::iterator iterator = consumers_.find(topicPartitionName); - if (consumers_.end() != iterator) { - iterator->second->pauseMessageListener(); - consumers_.erase(iterator); + auto optConsumer = consumers_.remove(topicPartitionName); + if (optConsumer.is_present()) { + optConsumer.value()->pauseMessageListener(); } if (consumerUnsubed->load() == numberPartitions) { @@ -352,7 +371,8 @@ void MultiTopicsConsumerImpl::handleOneTopicUnsubscribedAsync( } void MultiTopicsConsumerImpl::closeAsync(ResultCallback callback) { - if (state_ == Closing || state_ == Closed) { + const auto state = state_.load(); + if (state == Closing || state == Closed) { LOG_ERROR("TopicsConsumer already closed " << " topic" << topic_ << " consumer - " << consumerStr_); if (callback) { @@ -361,71 +381,65 @@ void MultiTopicsConsumerImpl::closeAsync(ResultCallback callback) { return; } - setState(Closing); + state_ = Closing; - if (consumers_.empty()) { + std::weak_ptr weakSelf{shared_from_this()}; + int numConsumers = 0; + consumers_.clear( + [this, weakSelf, &numConsumers, callback](const std::string& name, const ConsumerImplPtr& consumer) { + auto self = weakSelf.lock(); + if (!self) { + return; + } + numConsumers++; + consumer->closeAsync([this, weakSelf, name, callback](Result result) { + auto self = weakSelf.lock(); + if (!self) { + return; + } + LOG_DEBUG("Closing the consumer for partition - " << name << " numberTopicPartitions_ - " + << numberTopicPartitions_->load()); + const int numConsumersLeft = --*numberTopicPartitions_; + if (numConsumersLeft < 0) { + LOG_ERROR("[" << name << "] Unexpected number of left consumers: " << numConsumersLeft + << " during close"); + return; + } + if (result != ResultOk) { + state_ = Failed; + LOG_ERROR("Closing the consumer failed for partition - " << name << " with error - " + << result); + } + // closed all consumers + if (numConsumersLeft == 0) { + messages_.clear(); + topicsPartitions_.clear(); + unAckedMessageTrackerPtr_->clear(); + + if (state_ != Failed) { + state_ = Closed; + } + + if (callback) { + callback(result); + } + } + }); + }); + if (numConsumers == 0) { LOG_DEBUG("TopicsConsumer have no consumers to close " << " topic" << topic_ << " subscription - " << subscriptionName_); - setState(Closed); + state_ = Closed; if (callback) { callback(ResultAlreadyClosed); } return; } - // close successfully subscribed consumers - for (ConsumerMap::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - std::string topicPartitionName = consumer->first; - ConsumerImplPtr consumerPtr = consumer->second; - - consumerPtr->closeAsync(std::bind(&MultiTopicsConsumerImpl::handleSingleConsumerClose, - shared_from_this(), std::placeholders::_1, topicPartitionName, - callback)); - } - // fail pending recieve failPendingReceiveCallback(); } -void MultiTopicsConsumerImpl::handleSingleConsumerClose(Result result, std::string& topicPartitionName, - CloseCallback callback) { - std::map::iterator iterator = consumers_.find(topicPartitionName); - if (consumers_.end() != iterator) { - consumers_.erase(iterator); - } - - LOG_DEBUG("Closing the consumer for partition - " << topicPartitionName << " numberTopicPartitions_ - " - << numberTopicPartitions_->load()); - - assert(numberTopicPartitions_->load() > 0); - numberTopicPartitions_->fetch_sub(1); - - if (result != ResultOk) { - setState(Failed); - LOG_ERROR("Closing the consumer failed for partition - " << topicPartitionName << " with error - " - << result); - } - - // closed all consumers - if (numberTopicPartitions_->load() == 0) { - messages_.clear(); - consumers_.clear(); - topicsPartitions_.clear(); - unAckedMessageTrackerPtr_->clear(); - - if (state_ != Failed) { - state_ = Closed; - } - - multiTopicsConsumerCreatedPromise_.setFailed(ResultUnknownError); - if (callback) { - callback(result); - } - return; - } -} - void MultiTopicsConsumerImpl::messageReceived(Consumer consumer, const Message& msg) { LOG_DEBUG("Received Message from one of the topic - " << consumer.getTopic() << " message:" << msg.getDataAsString()); @@ -464,18 +478,14 @@ void MultiTopicsConsumerImpl::internalListener(Consumer consumer) { } Result MultiTopicsConsumerImpl::receive(Message& msg) { - Lock lock(mutex_); if (state_ != Ready) { - lock.unlock(); return ResultAlreadyClosed; } if (messageListener_) { - lock.unlock(); LOG_ERROR("Can not receive when a listener has been set"); return ResultInvalidConfiguration; } - lock.unlock(); messages_.pop(msg); unAckedMessageTrackerPtr_->add(msg.getMessageId()); @@ -483,19 +493,15 @@ Result MultiTopicsConsumerImpl::receive(Message& msg) { } Result MultiTopicsConsumerImpl::receive(Message& msg, int timeout) { - Lock lock(mutex_); if (state_ != Ready) { - lock.unlock(); return ResultAlreadyClosed; } if (messageListener_) { - lock.unlock(); LOG_ERROR("Can not receive when a listener has been set"); return ResultInvalidConfiguration; } - lock.unlock(); if (messages_.pop(msg, std::chrono::milliseconds(timeout))) { unAckedMessageTrackerPtr_->add(msg.getMessageId()); return ResultOk; @@ -508,12 +514,10 @@ void MultiTopicsConsumerImpl::receiveAsync(ReceiveCallback& callback) { Message msg; // fail the callback if consumer is closing or closed - Lock stateLock(mutex_); if (state_ != Ready) { callback(ResultAlreadyClosed, msg); return; } - stateLock.unlock(); Lock lock(pendingReceiveMutex_); if (messages_.pop(msg, std::chrono::milliseconds(0))) { @@ -543,15 +547,14 @@ void MultiTopicsConsumerImpl::acknowledgeAsync(const MessageId& msgId, ResultCal } const std::string& topicPartitionName = msgId.getTopicName(); - std::map::iterator iterator = consumers_.find(topicPartitionName); + auto optConsumer = consumers_.find(topicPartitionName); - if (consumers_.end() != iterator) { + if (optConsumer.is_present()) { unAckedMessageTrackerPtr_->remove(msgId); - iterator->second->acknowledgeAsync(msgId, callback); + optConsumer.value()->acknowledgeAsync(msgId, callback); } else { LOG_ERROR("Message of topic: " << topicPartitionName << " not in unAckedMessageTracker"); callback(ResultUnknownError); - return; } } @@ -560,11 +563,11 @@ void MultiTopicsConsumerImpl::acknowledgeCumulativeAsync(const MessageId& msgId, } void MultiTopicsConsumerImpl::negativeAcknowledge(const MessageId& msgId) { - auto iterator = consumers_.find(msgId.getTopicName()); + auto optConsumer = consumers_.find(msgId.getTopicName()); - if (consumers_.end() != iterator) { + if (optConsumer.is_present()) { unAckedMessageTrackerPtr_->remove(msgId); - iterator->second->negativeAcknowledge(msgId); + optConsumer.value()->negativeAcknowledge(msgId); } } @@ -579,48 +582,25 @@ const std::string& MultiTopicsConsumerImpl::getTopic() const { return topic_; } const std::string& MultiTopicsConsumerImpl::getName() const { return consumerStr_; } -void MultiTopicsConsumerImpl::setState(const MultiTopicsConsumerState state) { - Lock lock(mutex_); - state_ = state; -} - -bool MultiTopicsConsumerImpl::compareAndSetState(MultiTopicsConsumerState expect, - MultiTopicsConsumerState update) { - Lock lock(mutex_); - if (state_ == expect) { - state_ = update; - return true; - } else { - return false; - } -} - void MultiTopicsConsumerImpl::shutdown() {} bool MultiTopicsConsumerImpl::isClosed() { return state_ == Closed; } -bool MultiTopicsConsumerImpl::isOpen() { - Lock lock(mutex_); - return state_ == Ready; -} +bool MultiTopicsConsumerImpl::isOpen() { return state_ == Ready; } void MultiTopicsConsumerImpl::receiveMessages() { - for (ConsumerMap::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - ConsumerImplPtr consumerPtr = consumer->second; - consumerPtr->sendFlowPermitsToBroker(consumerPtr->getCnx().lock(), conf_.getReceiverQueueSize()); - LOG_DEBUG("Sending FLOW command for consumer - " << consumerPtr->getConsumerId()); - } + const auto receiverQueueSize = conf_.getReceiverQueueSize(); + consumers_.forEachValue([receiverQueueSize](const ConsumerImplPtr& consumer) { + consumer->sendFlowPermitsToBroker(consumer->getCnx().lock(), receiverQueueSize); + LOG_DEBUG("Sending FLOW command for consumer - " << consumer->getConsumerId()); + }); } Result MultiTopicsConsumerImpl::pauseMessageListener() { if (!messageListener_) { return ResultInvalidConfiguration; } - for (ConsumerMap::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - (consumer->second)->pauseMessageListener(); - } + consumers_.forEachValue([](const ConsumerImplPtr& consumer) { consumer->pauseMessageListener(); }); return ResultOk; } @@ -628,19 +608,14 @@ Result MultiTopicsConsumerImpl::resumeMessageListener() { if (!messageListener_) { return ResultInvalidConfiguration; } - for (ConsumerMap::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - (consumer->second)->resumeMessageListener(); - } + consumers_.forEachValue([](const ConsumerImplPtr& consumer) { consumer->resumeMessageListener(); }); return ResultOk; } void MultiTopicsConsumerImpl::redeliverUnacknowledgedMessages() { LOG_DEBUG("Sending RedeliverUnacknowledgedMessages command for partitioned consumer."); - for (ConsumerMap::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - (consumer->second)->redeliverUnacknowledgedMessages(); - } + consumers_.forEachValue( + [](const ConsumerImplPtr& consumer) { consumer->redeliverUnacknowledgedMessages(); }); unAckedMessageTrackerPtr_->clear(); } @@ -653,33 +628,33 @@ void MultiTopicsConsumerImpl::redeliverUnacknowledgedMessages(const std::setsecond)->redeliverUnacknowledgedMessages(messageIds); - } + consumers_.forEachValue([&messageIds](const ConsumerImplPtr& consumer) { + consumer->redeliverUnacknowledgedMessages(messageIds); + }); } int MultiTopicsConsumerImpl::getNumOfPrefetchedMessages() const { return messages_.size(); } void MultiTopicsConsumerImpl::getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) { - Lock lock(mutex_); if (state_ != Ready) { - lock.unlock(); callback(ResultConsumerNotInitialized, BrokerConsumerStats()); return; } + Lock lock(mutex_); MultiTopicsBrokerConsumerStatsPtr statsPtr = std::make_shared(numberTopicPartitions_->load()); LatchPtr latchPtr = std::make_shared(numberTopicPartitions_->load()); - int size = consumers_.size(); lock.unlock(); - ConsumerMap::const_iterator consumer = consumers_.begin(); - for (int i = 0; i < size; i++, consumer++) { - consumer->second->getBrokerConsumerStatsAsync( - std::bind(&MultiTopicsConsumerImpl::handleGetConsumerStats, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, latchPtr, statsPtr, i, callback)); - } + auto self = shared_from_this(); + size_t i = 0; + consumers_.forEachValue([&self, &latchPtr, &statsPtr, &i, callback](const ConsumerImplPtr& consumer) { + size_t index = i++; + consumer->getBrokerConsumerStatsAsync( + [self, latchPtr, statsPtr, index, callback](Result result, BrokerConsumerStats stats) { + self->handleGetConsumerStats(result, stats, latchPtr, statsPtr, index, callback); + }); + }); } void MultiTopicsConsumerImpl::handleGetConsumerStats(Result res, BrokerConsumerStats brokerConsumerStats, @@ -721,39 +696,121 @@ void MultiTopicsConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback c } void MultiTopicsConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { - callback(ResultOperationNotSupported); + if (state_ != Ready) { + callback(ResultAlreadyClosed); + return; + } + + MultiResultCallback multiResultCallback(callback, consumers_.size()); + consumers_.forEachValue([×tamp, &multiResultCallback](ConsumerImplPtr consumer) { + consumer->seekAsync(timestamp, multiResultCallback); + }); } void MultiTopicsConsumerImpl::setNegativeAcknowledgeEnabledForTesting(bool enabled) { - Lock lock(mutex_); - for (auto&& c : consumers_) { - c.second->setNegativeAcknowledgeEnabledForTesting(enabled); - } + consumers_.forEachValue([enabled](const ConsumerImplPtr& consumer) { + consumer->setNegativeAcknowledgeEnabledForTesting(enabled); + }); } bool MultiTopicsConsumerImpl::isConnected() const { - Lock lock(mutex_); if (state_ != Ready) { return false; } - for (const auto& topicAndConsumer : consumers_) { - if (!topicAndConsumer.second->isConnected()) { - return false; - } - } - return true; + return consumers_ + .findFirstValueIf([](const ConsumerImplPtr& consumer) { return !consumer->isConnected(); }) + .is_empty(); } uint64_t MultiTopicsConsumerImpl::getNumberOfConnectedConsumer() { - Lock lock(mutex_); uint64_t numberOfConnectedConsumer = 0; - const auto consumers = consumers_; - lock.unlock(); - for (const auto& topicAndConsumer : consumers) { - if (topicAndConsumer.second->isConnected()) { + consumers_.forEachValue([&numberOfConnectedConsumer](const ConsumerImplPtr& consumer) { + if (consumer->isConnected()) { numberOfConnectedConsumer++; } - } + }); return numberOfConnectedConsumer; } +void MultiTopicsConsumerImpl::runPartitionUpdateTask() { + partitionsUpdateTimer_->expires_from_now(partitionsUpdateInterval_); + auto self = shared_from_this(); + partitionsUpdateTimer_->async_wait([self](const boost::system::error_code& ec) { + // If two requests call runPartitionUpdateTask at the same time, the timer will fail, and it + // cannot continue at this time, and the request needs to be ignored. + if (!ec) { + self->topicPartitionUpdate(); + } + }); +} +void MultiTopicsConsumerImpl::topicPartitionUpdate() { + using namespace std::placeholders; + Lock lock(mutex_); + auto topicsPartitions = topicsPartitions_; + lock.unlock(); + for (const auto& item : topicsPartitions) { + auto topicName = TopicName::get(item.first); + auto currentNumPartitions = item.second; + lookupServicePtr_->getPartitionMetadataAsync(topicName).addListener( + std::bind(&MultiTopicsConsumerImpl::handleGetPartitions, shared_from_this(), topicName, + std::placeholders::_1, std::placeholders::_2, currentNumPartitions)); + } +} +void MultiTopicsConsumerImpl::handleGetPartitions(TopicNamePtr topicName, Result result, + const LookupDataResultPtr& lookupDataResult, + int currentNumPartitions) { + if (state_ != Ready) { + return; + } + if (!result) { + const auto newNumPartitions = static_cast(lookupDataResult->getPartitions()); + if (newNumPartitions > currentNumPartitions) { + LOG_INFO("new partition count: " << newNumPartitions + << " current partition count: " << currentNumPartitions); + auto partitionsNeedCreate = + std::make_shared>(newNumPartitions - currentNumPartitions); + ConsumerSubResultPromisePtr topicPromise = std::make_shared>(); + Lock lock(mutex_); + topicsPartitions_[topicName->toString()] = newNumPartitions; + lock.unlock(); + numberTopicPartitions_->fetch_add(newNumPartitions - currentNumPartitions); + for (unsigned int i = currentNumPartitions; i < newNumPartitions; i++) { + subscribeSingleNewConsumer(newNumPartitions, topicName, i, topicPromise, + partitionsNeedCreate); + } + // `runPartitionUpdateTask()` will be called in `handleSingleConsumerCreated()` + return; + } + } else { + LOG_WARN("Failed to getPartitionMetadata: " << strResult(result)); + } + runPartitionUpdateTask(); +} + +void MultiTopicsConsumerImpl::subscribeSingleNewConsumer( + int numPartitions, TopicNamePtr topicName, int partitionIndex, + ConsumerSubResultPromisePtr topicSubResultPromise, + std::shared_ptr> partitionsNeedCreate) { + ConsumerConfiguration config = conf_.clone(); + ExecutorServicePtr internalListenerExecutor = client_->getPartitionListenerExecutorProvider()->get(); + config.setMessageListener(std::bind(&MultiTopicsConsumerImpl::messageReceived, shared_from_this(), + std::placeholders::_1, std::placeholders::_2)); + + // Apply total limit of receiver queue size across partitions + config.setReceiverQueueSize( + std::min(conf_.getReceiverQueueSize(), + (int)(conf_.getMaxTotalReceiverQueueSizeAcrossPartitions() / numPartitions))); + + std::string topicPartitionName = topicName->getTopicPartitionName(partitionIndex); + + auto consumer = std::make_shared(client_, topicPartitionName, subscriptionName_, config, + internalListenerExecutor, true, Partitioned); + consumer->getConsumerCreatedFuture().addListener( + std::bind(&MultiTopicsConsumerImpl::handleSingleConsumerCreated, shared_from_this(), + std::placeholders::_1, std::placeholders::_2, partitionsNeedCreate, topicSubResultPromise)); + consumer->setPartitionIndex(partitionIndex); + consumer->start(); + consumers_.emplace(topicPartitionName, consumer); + LOG_INFO("Add Creating Consumer for - " << topicPartitionName << " - " << consumerStr_ + << " consumerSize: " << consumers_.size()); +} diff --git a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h index aa6b261a267d8..95c24f68c5b78 100644 --- a/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h +++ b/pulsar-client-cpp/lib/MultiTopicsConsumerImpl.h @@ -32,6 +32,7 @@ #include #include #include +#include namespace pulsar { typedef std::shared_ptr> ConsumerSubResultPromisePtr; @@ -50,7 +51,14 @@ class MultiTopicsConsumerImpl : public ConsumerImplBase, }; MultiTopicsConsumerImpl(ClientImplPtr client, const std::vector& topics, const std::string& subscriptionName, TopicNamePtr topicName, - const ConsumerConfiguration& conf, const LookupServicePtr lookupServicePtr_); + const ConsumerConfiguration& conf, LookupServicePtr lookupServicePtr_); + MultiTopicsConsumerImpl(ClientImplPtr client, TopicNamePtr topicName, int numPartitions, + const std::string& subscriptionName, const ConsumerConfiguration& conf, + LookupServicePtr lookupServicePtr) + : MultiTopicsConsumerImpl(client, {topicName->toString()}, subscriptionName, topicName, conf, + lookupServicePtr) { + topicsPartitions_[topicName->toString()] = numPartitions; + } ~MultiTopicsConsumerImpl(); // overrided methods from ConsumerImplBase Future getConsumerCreatedFuture() override; @@ -93,29 +101,28 @@ class MultiTopicsConsumerImpl : public ConsumerImplBase, std::string consumerStr_; std::string topic_; const ConsumerConfiguration conf_; - typedef std::map ConsumerMap; + typedef SynchronizedHashMap ConsumerMap; ConsumerMap consumers_; std::map topicsPartitions_; mutable std::mutex mutex_; std::mutex pendingReceiveMutex_; - MultiTopicsConsumerState state_ = Pending; + std::atomic state_{Pending}; BlockingQueue messages_; - ExecutorServicePtr listenerExecutor_; + const ExecutorServicePtr listenerExecutor_; MessageListener messageListener_; + DeadlineTimerPtr partitionsUpdateTimer_; + boost::posix_time::time_duration partitionsUpdateInterval_; LookupServicePtr lookupServicePtr_; std::shared_ptr> numberTopicPartitions_; + std::atomic failedResult{ResultOk}; Promise multiTopicsConsumerCreatedPromise_; UnAckedMessageTrackerPtr unAckedMessageTrackerPtr_; - const std::vector& topics_; + const std::vector topics_; std::queue pendingReceives_; /* methods */ - void setState(MultiTopicsConsumerState state); - bool compareAndSetState(MultiTopicsConsumerState expect, MultiTopicsConsumerState update); - void handleSinglePartitionConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, unsigned int partitionIndex); - void handleSingleConsumerClose(Result result, std::string& topicPartitionName, CloseCallback callback); void notifyResult(CloseCallback closeCallback); void messageReceived(Consumer consumer, const Message& msg); void internalListener(Consumer consumer); @@ -124,9 +131,7 @@ class MultiTopicsConsumerImpl : public ConsumerImplBase, void handleOneTopicSubscribed(Result result, Consumer consumer, const std::string& topic, std::shared_ptr> topicsNeedCreate); - void subscribeTopicPartitions(const Result result, const LookupDataResultPtr partitionMetadata, - TopicNamePtr topicName, const std::string& consumerName, - ConsumerConfiguration conf, + void subscribeTopicPartitions(int numPartitions, TopicNamePtr topicName, const std::string& consumerName, ConsumerSubResultPromisePtr topicSubResultPromise); void handleSingleConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, std::shared_ptr> partitionsNeedCreate, @@ -136,11 +141,19 @@ class MultiTopicsConsumerImpl : public ConsumerImplBase, void handleOneTopicUnsubscribedAsync(Result result, std::shared_ptr> consumerUnsubed, int numberPartitions, TopicNamePtr topicNamePtr, std::string& topicPartitionName, ResultCallback callback); + void runPartitionUpdateTask(); + void topicPartitionUpdate(); + void handleGetPartitions(TopicNamePtr topicName, Result result, + const LookupDataResultPtr& lookupDataResult, int currentNumPartitions); + void subscribeSingleNewConsumer(int numPartitions, TopicNamePtr topicName, int partitionIndex, + ConsumerSubResultPromisePtr topicSubResultPromise, + std::shared_ptr> partitionsNeedCreate); private: void setNegativeAcknowledgeEnabledForTesting(bool enabled) override; FRIEND_TEST(ConsumerTest, testMultiTopicsConsumerUnAckedMessageRedelivery); + FRIEND_TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery); }; typedef std::shared_ptr MultiTopicsConsumerImplPtr; diff --git a/pulsar-client-cpp/lib/PartitionedBrokerConsumerStatsImpl.cc b/pulsar-client-cpp/lib/PartitionedBrokerConsumerStatsImpl.cc deleted file mode 100644 index 9d5965b24bd92..0000000000000 --- a/pulsar-client-cpp/lib/PartitionedBrokerConsumerStatsImpl.cc +++ /dev/null @@ -1,163 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include -#include -#include -#include - -namespace pulsar { - -const std::string PartitionedBrokerConsumerStatsImpl::DELIMITER = ";"; - -PartitionedBrokerConsumerStatsImpl::PartitionedBrokerConsumerStatsImpl(size_t size) { - statsList_.resize(size); -} - -bool PartitionedBrokerConsumerStatsImpl::isValid() const { - bool isValid = true; - for (int i = 0; i < statsList_.size(); i++) { - isValid &= statsList_[i].isValid(); - } - return isValid; -} - -std::ostream& operator<<(std::ostream& os, const PartitionedBrokerConsumerStatsImpl& obj) { - os << "\nPartitionedBrokerConsumerStatsImpl [" - << "validTill_ = " << obj.isValid() << ", msgRateOut_ = " << obj.getMsgRateOut() - << ", msgThroughputOut_ = " << obj.getMsgThroughputOut() - << ", msgRateRedeliver_ = " << obj.getMsgRateRedeliver() - << ", consumerName_ = " << obj.getConsumerName() - << ", availablePermits_ = " << obj.getAvailablePermits() - << ", unackedMessages_ = " << obj.getUnackedMessages() - << ", blockedConsumerOnUnackedMsgs_ = " << obj.isBlockedConsumerOnUnackedMsgs() - << ", address_ = " << obj.getAddress() << ", connectedSince_ = " << obj.getConnectedSince() - << ", type_ = " << obj.getType() << ", msgRateExpired_ = " << obj.getMsgRateExpired() - << ", msgBacklog_ = " << obj.getMsgBacklog() << "]"; - return os; -} - -double PartitionedBrokerConsumerStatsImpl::getMsgRateOut() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgRateOut(); - } - return sum; -} - -double PartitionedBrokerConsumerStatsImpl::getMsgThroughputOut() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgThroughputOut(); - } - return sum; -} - -double PartitionedBrokerConsumerStatsImpl::getMsgRateRedeliver() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgRateRedeliver(); - } - return sum; -} - -const std::string PartitionedBrokerConsumerStatsImpl::getConsumerName() const { - std::string str; - for (int i = 0; i < statsList_.size(); i++) { - str += statsList_[i].getConsumerName() + DELIMITER; - } - return str; -} - -uint64_t PartitionedBrokerConsumerStatsImpl::getAvailablePermits() const { - uint64_t sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getAvailablePermits(); - } - return sum; -} - -uint64_t PartitionedBrokerConsumerStatsImpl::getUnackedMessages() const { - uint64_t sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getUnackedMessages(); - } - return sum; -} - -bool PartitionedBrokerConsumerStatsImpl::isBlockedConsumerOnUnackedMsgs() const { - if (statsList_.size() == 0) { - return false; - } - - bool isValid = true; - for (int i = 0; i < statsList_.size(); i++) { - isValid &= statsList_[i].isValid(); - } - return isValid; -} - -const std::string PartitionedBrokerConsumerStatsImpl::getAddress() const { - std::string str; - for (int i = 0; i < statsList_.size(); i++) { - str += statsList_[i].getAddress() + DELIMITER; - } - return str; -} - -const std::string PartitionedBrokerConsumerStatsImpl::getConnectedSince() const { - std::string str; - for (int i = 0; i < statsList_.size(); i++) { - str += statsList_[i].getConnectedSince() + DELIMITER; - } - return str; -} - -const ConsumerType PartitionedBrokerConsumerStatsImpl::getType() const { - if (!statsList_.size()) { - return ConsumerExclusive; - } - return statsList_[0].getType(); -} - -double PartitionedBrokerConsumerStatsImpl::getMsgRateExpired() const { - double sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgRateExpired(); - } - return sum; -} - -uint64_t PartitionedBrokerConsumerStatsImpl::getMsgBacklog() const { - uint64_t sum = 0; - for (int i = 0; i < statsList_.size(); i++) { - sum += statsList_[i].getMsgBacklog(); - } - return sum; -} - -BrokerConsumerStats PartitionedBrokerConsumerStatsImpl::getBrokerConsumerStats(int index) { - return statsList_[index]; -} - -void PartitionedBrokerConsumerStatsImpl::add(BrokerConsumerStats stats, int index) { - statsList_[index] = stats; -} - -void PartitionedBrokerConsumerStatsImpl::clear() { statsList_.clear(); } -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/PartitionedBrokerConsumerStatsImpl.h b/pulsar-client-cpp/lib/PartitionedBrokerConsumerStatsImpl.h deleted file mode 100644 index 683f5245dbb2c..0000000000000 --- a/pulsar-client-cpp/lib/PartitionedBrokerConsumerStatsImpl.h +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_CPP_PARTITIONEDBROKERCONSUMERSTATSIMPL_H -#define PULSAR_CPP_PARTITIONEDBROKERCONSUMERSTATSIMPL_H - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace pulsar { -class PULSAR_PUBLIC PartitionedBrokerConsumerStatsImpl : public BrokerConsumerStatsImplBase { - private: - std::vector statsList_; - static const std::string DELIMITER; - - public: - PartitionedBrokerConsumerStatsImpl(size_t size); - - /** Returns true if the Stats are still valid **/ - virtual bool isValid() const; - - /** Returns the rate of messages delivered to the consumer. msg/s */ - virtual double getMsgRateOut() const; - - /** Returns the throughput delivered to the consumer. bytes/s */ - virtual double getMsgThroughputOut() const; - - /** Returns the rate of messages redelivered by this consumer. msg/s */ - virtual double getMsgRateRedeliver() const; - - /** Returns the Name of the consumer */ - virtual const std::string getConsumerName() const; - - /** Returns the Number of available message permits for the consumer */ - virtual uint64_t getAvailablePermits() const; - - /** Returns the Number of unacknowledged messages for the consumer */ - virtual uint64_t getUnackedMessages() const; - - /** Returns true if the consumer is blocked due to unacked messages. */ - virtual bool isBlockedConsumerOnUnackedMsgs() const; - - /** Returns the Address of this consumer */ - virtual const std::string getAddress() const; - - /** Returns the Timestamp of connection */ - virtual const std::string getConnectedSince() const; - - /** Returns Whether this subscription is Exclusive or Shared or Failover */ - virtual const ConsumerType getType() const; - - /** Returns the rate of messages expired on this subscription. msg/s */ - virtual double getMsgRateExpired() const; - - /** Returns the Number of messages in the subscription backlog */ - virtual uint64_t getMsgBacklog() const; - - /** Returns the BrokerConsumerStatsImpl at of ith partition */ - BrokerConsumerStats getBrokerConsumerStats(int index); - - void add(BrokerConsumerStats stats, int index); - - void clear(); - - friend std::ostream &operator<<(std::ostream &os, const PartitionedBrokerConsumerStatsImpl &obj); -}; -typedef std::shared_ptr PartitionedBrokerConsumerStatsPtr; -} // namespace pulsar -#endif // PULSAR_CPP_BROKERCONSUMERSTATSIMPL_H diff --git a/pulsar-client-cpp/lib/PartitionedConsumerImpl.cc b/pulsar-client-cpp/lib/PartitionedConsumerImpl.cc deleted file mode 100644 index e43b5090e43f7..0000000000000 --- a/pulsar-client-cpp/lib/PartitionedConsumerImpl.cc +++ /dev/null @@ -1,647 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "PartitionedConsumerImpl.h" -#include "MultiResultCallback.h" - -DECLARE_LOG_OBJECT() - -namespace pulsar { - -PartitionedConsumerImpl::PartitionedConsumerImpl(ClientImplPtr client, const std::string& subscriptionName, - const TopicNamePtr topicName, - const unsigned int numPartitions, - const ConsumerConfiguration& conf) - : client_(client), - subscriptionName_(subscriptionName), - topicName_(topicName), - numPartitions_(numPartitions), - conf_(conf), - messages_(1000), - listenerExecutor_(client->getListenerExecutorProvider()->get()), - messageListener_(conf.getMessageListener()), - topic_(topicName->toString()) { - std::stringstream consumerStrStream; - consumerStrStream << "[Partitioned Consumer: " << topic_ << "," << subscriptionName << "," - << numPartitions << "]"; - if (conf.getUnAckedMessagesTimeoutMs() != 0) { - if (conf.getTickDurationInMs() > 0) { - unAckedMessageTrackerPtr_.reset(new UnAckedMessageTrackerEnabled( - conf.getUnAckedMessagesTimeoutMs(), conf.getTickDurationInMs(), client, *this)); - } else { - unAckedMessageTrackerPtr_.reset( - new UnAckedMessageTrackerEnabled(conf.getUnAckedMessagesTimeoutMs(), client, *this)); - } - } else { - unAckedMessageTrackerPtr_.reset(new UnAckedMessageTrackerDisabled()); - } - auto partitionsUpdateInterval = static_cast(client_->conf().getPartitionsUpdateInterval()); - if (partitionsUpdateInterval > 0) { - partitionsUpdateTimer_ = listenerExecutor_->createDeadlineTimer(); - partitionsUpdateInterval_ = boost::posix_time::seconds(partitionsUpdateInterval); - lookupServicePtr_ = client_->getLookup(); - } -} - -PartitionedConsumerImpl::~PartitionedConsumerImpl() {} - -Future PartitionedConsumerImpl::getConsumerCreatedFuture() { - return partitionedConsumerCreatedPromise_.getFuture(); -} -const std::string& PartitionedConsumerImpl::getSubscriptionName() const { return subscriptionName_; } - -const std::string& PartitionedConsumerImpl::getTopic() const { return topic_; } - -Result PartitionedConsumerImpl::receive(Message& msg) { - Lock lock(mutex_); - if (state_ != Ready) { - lock.unlock(); - return ResultAlreadyClosed; - } - // See comments in `receive(Message&, int)` - lock.unlock(); - - if (messageListener_) { - LOG_ERROR("Can not receive when a listener has been set"); - return ResultInvalidConfiguration; - } - - messages_.pop(msg); - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - return ResultOk; -} - -Result PartitionedConsumerImpl::receive(Message& msg, int timeout) { - Lock lock(mutex_); - if (state_ != Ready) { - lock.unlock(); - return ResultAlreadyClosed; - } - // We unlocked `mutex_` here to avoid starvation of methods which are trying to acquire `mutex_`. - // In addition, `messageListener_` won't change once constructed, `BlockingQueue::pop` and - // `UnAckedMessageTracker::add` are thread-safe, so they don't need `mutex_` to achieve thread-safety. - lock.unlock(); - - if (messageListener_) { - LOG_ERROR("Can not receive when a listener has been set"); - return ResultInvalidConfiguration; - } - - if (messages_.pop(msg, std::chrono::milliseconds(timeout))) { - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - return ResultOk; - } else { - return ResultTimeout; - } -} - -void PartitionedConsumerImpl::receiveAsync(ReceiveCallback& callback) { - Message msg; - - // fail the callback if consumer is closing or closed - Lock stateLock(mutex_); - if (state_ != Ready) { - callback(ResultAlreadyClosed, msg); - return; - } - stateLock.unlock(); - - Lock lock(pendingReceiveMutex_); - if (messages_.pop(msg, std::chrono::milliseconds(0))) { - lock.unlock(); - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - callback(ResultOk, msg); - } else { - pendingReceives_.push(callback); - } -} - -void PartitionedConsumerImpl::unsubscribeAsync(ResultCallback callback) { - LOG_INFO("[" << topicName_->toString() << "," << subscriptionName_ << "] Unsubscribing"); - // change state to Closing, so that no Ready state operation is permitted during unsubscribe - setState(Closing); - // do not accept un subscribe until we have subscribe to all of the partitions of a topic - // it's a logical single topic so it should behave like a single topic, even if it's sharded - Lock lock(mutex_); - if (state_ != Ready) { - lock.unlock(); - unsigned int index = 0; - for (ConsumerList::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - LOG_DEBUG("Unsubcribing Consumer - " << index << " for Subscription - " << subscriptionName_ - << " for Topic - " << topicName_->toString()); - (*consumer)->unsubscribeAsync(std::bind(&PartitionedConsumerImpl::handleUnsubscribeAsync, - shared_from_this(), std::placeholders::_1, index++, - callback)); - } - } -} - -void PartitionedConsumerImpl::handleUnsubscribeAsync(Result result, unsigned int consumerIndex, - ResultCallback callback) { - Lock lock(mutex_); - if (state_ == Failed) { - lock.unlock(); - // we have already informed the client that unsubcribe has failed so, ignore this callbacks - // or do we still go ahead and check how many could we close successfully? - LOG_DEBUG("handleUnsubscribeAsync callback received in Failed State for consumerIndex - " - << consumerIndex << "with Result - " << result << " for Subscription - " - << subscriptionName_ << " for Topic - " << topicName_->toString()); - return; - } - lock.unlock(); - if (result != ResultOk) { - setState(Failed); - LOG_ERROR("Error Closing one of the parition consumers, consumerIndex - " << consumerIndex); - callback(ResultUnknownError); - return; - } - const auto numPartitions = getNumPartitionsWithLock(); - assert(unsubscribedSoFar_ <= numPartitions); - assert(consumerIndex <= numPartitions); - // this means we have successfully closed this partition consumer and no unsubscribe has failed so far - LOG_INFO("Successfully Unsubscribed Consumer - " << consumerIndex << " for Subscription - " - << subscriptionName_ << " for Topic - " - << topicName_->toString()); - unsubscribedSoFar_++; - if (unsubscribedSoFar_ == numPartitions) { - LOG_DEBUG("Unsubscribed all of the partition consumer for subscription - " << subscriptionName_); - setState(Closed); - callback(ResultOk); - return; - } -} - -void PartitionedConsumerImpl::acknowledgeAsync(const MessageId& msgId, ResultCallback callback) { - int32_t partition = msgId.partition(); -#ifndef NDEBUG - Lock consumersLock(consumersMutex_); - assert(partition < getNumPartitions() && partition >= 0 && consumers_.size() > partition); - consumersLock.unlock(); -#endif - unAckedMessageTrackerPtr_->remove(msgId); - consumers_[partition]->acknowledgeAsync(msgId, callback); -} - -void PartitionedConsumerImpl::acknowledgeCumulativeAsync(const MessageId& msgId, ResultCallback callback) { - callback(ResultOperationNotSupported); -} - -void PartitionedConsumerImpl::negativeAcknowledge(const MessageId& msgId) { - int32_t partition = msgId.partition(); - unAckedMessageTrackerPtr_->remove(msgId); - consumers_[partition]->negativeAcknowledge(msgId); -} - -unsigned int PartitionedConsumerImpl::getNumPartitions() const { return numPartitions_; } - -unsigned int PartitionedConsumerImpl::getNumPartitionsWithLock() const { - Lock consumersLock(consumersMutex_); - return getNumPartitions(); -} - -ConsumerConfiguration PartitionedConsumerImpl::getSinglePartitionConsumerConfig() const { - using namespace std::placeholders; - - ConsumerConfiguration config = conf_.clone(); - // all the partitioned-consumer belonging to one partitioned topic should have same name - config.setConsumerName(conf_.getConsumerName()); - config.setConsumerType(conf_.getConsumerType()); - config.setBrokerConsumerStatsCacheTimeInMs(conf_.getBrokerConsumerStatsCacheTimeInMs()); - - const auto shared_this = const_cast(this)->shared_from_this(); - config.setMessageListener(std::bind(&PartitionedConsumerImpl::messageReceived, shared_this, - std::placeholders::_1, std::placeholders::_2)); - - // Apply total limit of receiver queue size across partitions - // NOTE: if it's called by handleGetPartitions(), the queue size of new internal consumers may be smaller - // than previous created internal consumers. - config.setReceiverQueueSize( - std::min(conf_.getReceiverQueueSize(), - (int)(conf_.getMaxTotalReceiverQueueSizeAcrossPartitions() / getNumPartitions()))); - - return config; -} - -ConsumerImplPtr PartitionedConsumerImpl::newInternalConsumer(unsigned int partition, - const ConsumerConfiguration& config) const { - using namespace std::placeholders; - - std::string topicPartitionName = topicName_->getTopicPartitionName(partition); - auto consumer = std::make_shared(client_, topicPartitionName, subscriptionName_, config, - internalListenerExecutor_, true, Partitioned); - - const auto shared_this = const_cast(this)->shared_from_this(); - consumer->getConsumerCreatedFuture().addListener( - std::bind(&PartitionedConsumerImpl::handleSinglePartitionConsumerCreated, shared_this, - std::placeholders::_1, std::placeholders::_2, partition)); - consumer->setPartitionIndex(partition); - - LOG_DEBUG("Creating Consumer for single Partition - " << topicPartitionName << "SubName - " - << subscriptionName_); - return consumer; -} - -void PartitionedConsumerImpl::start() { - internalListenerExecutor_ = client_->getPartitionListenerExecutorProvider()->get(); - const auto config = getSinglePartitionConsumerConfig(); - - // create consumer on each partition - // Here we don't need `consumersMutex` to protect `consumers_`, because `consumers_` can only be increased - // when `state_` is Ready - for (unsigned int i = 0; i < getNumPartitions(); i++) { - consumers_.push_back(newInternalConsumer(i, config)); - } - for (ConsumerList::const_iterator consumer = consumers_.begin(); consumer != consumers_.end(); - consumer++) { - (*consumer)->start(); - } -} - -void PartitionedConsumerImpl::handleSinglePartitionConsumerCreated( - Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, unsigned int partitionIndex) { - ResultCallback nullCallbackForCleanup = NULL; - Lock lock(mutex_); - if (state_ == Failed) { - // one of the consumer creation failed, and we are cleaning up - return; - } - const auto numPartitions = getNumPartitionsWithLock(); - assert(numConsumersCreated_ < numPartitions); - - if (result != ResultOk) { - state_ = Failed; - lock.unlock(); - partitionedConsumerCreatedPromise_.setFailed(result); - // unsubscribed all of the successfully subscribed partitioned consumers - closeAsync(nullCallbackForCleanup); - LOG_ERROR("Unable to create Consumer for partition - " << partitionIndex << " Error - " << result); - return; - } - - assert(partitionIndex < numPartitions && partitionIndex >= 0); - numConsumersCreated_++; - if (numConsumersCreated_ == numPartitions) { - LOG_INFO("Successfully Subscribed to Partitioned Topic - " << topicName_->toString() << " with - " - << numPartitions << " Partitions."); - state_ = Ready; - lock.unlock(); - if (partitionsUpdateTimer_) { - runPartitionUpdateTask(); - } - receiveMessages(); - partitionedConsumerCreatedPromise_.setValue(shared_from_this()); - return; - } -} - -void PartitionedConsumerImpl::handleSinglePartitionConsumerClose(Result result, unsigned int partitionIndex, - CloseCallback callback) { - Lock lock(mutex_); - if (state_ == Failed) { - // we should have already notified the client by callback - return; - } - if (result != ResultOk) { - state_ = Failed; - LOG_ERROR("Closing the consumer failed for partition - " << partitionIndex); - lock.unlock(); - partitionedConsumerCreatedPromise_.setFailed(result); - if (callback) { - callback(result); - } - return; - } - assert(partitionIndex < getNumPartitionsWithLock() && partitionIndex >= 0); - if (numConsumersCreated_ > 0) { - numConsumersCreated_--; - } - // closed all successfully - if (!numConsumersCreated_) { - state_ = Closed; - lock.unlock(); - // set the producerCreatedPromise to failure - partitionedConsumerCreatedPromise_.setFailed(ResultUnknownError); - if (callback) { - callback(result); - } - return; - } -} -void PartitionedConsumerImpl::closeAsync(ResultCallback callback) { - if (consumers_.empty()) { - notifyResult(callback); - return; - } - setState(Closed); - unsigned int consumerAlreadyClosed = 0; - // close successfully subscribed consumers - // Here we don't need `consumersMutex` to protect `consumers_`, because `consumers_` can only be increased - // when `state_` is Ready - for (auto& consumer : consumers_) { - if (!consumer->isClosed()) { - auto self = shared_from_this(); - const auto partition = consumer->getPartitionIndex(); - consumer->closeAsync([this, self, partition, callback](Result result) { - handleSinglePartitionConsumerClose(result, partition, callback); - }); - } else { - if (++consumerAlreadyClosed == consumers_.size()) { - // everything is closed already. so we are good. - notifyResult(callback); - return; - } - } - } - - // fail pending recieve - failPendingReceiveCallback(); -} - -void PartitionedConsumerImpl::notifyResult(CloseCallback closeCallback) { - if (closeCallback) { - // this means client invoked the closeAsync with a valid callback - setState(Closed); - closeCallback(ResultOk); - } else { - // consumer create failed, closeAsync called to cleanup the successfully created producers - setState(Failed); - partitionedConsumerCreatedPromise_.setFailed(ResultUnknownError); - } -} - -void PartitionedConsumerImpl::setState(const PartitionedConsumerState state) { - Lock lock(mutex_); - state_ = state; - lock.unlock(); -} - -void PartitionedConsumerImpl::shutdown() {} - -bool PartitionedConsumerImpl::isClosed() { return state_ == Closed; } - -bool PartitionedConsumerImpl::isOpen() { - Lock lock(mutex_); - return state_ == Ready; -} - -void PartitionedConsumerImpl::messageReceived(Consumer consumer, const Message& msg) { - LOG_DEBUG("Received Message from one of the partition - " << msg.impl_->messageId.partition()); - const std::string& topicPartitionName = consumer.getTopic(); - msg.impl_->setTopicName(topicPartitionName); - // messages_ is a blocking queue: if queue is already full then no need of lock as receiveAsync already - // gets available-msg and no need to put request in pendingReceives_ - Lock lock(pendingReceiveMutex_); - if (!pendingReceives_.empty()) { - ReceiveCallback callback = pendingReceives_.front(); - pendingReceives_.pop(); - lock.unlock(); - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - listenerExecutor_->postWork(std::bind(callback, ResultOk, msg)); - } else { - if (messages_.full()) { - lock.unlock(); - } - messages_.push(msg); - if (messageListener_) { - unAckedMessageTrackerPtr_->add(msg.getMessageId()); - listenerExecutor_->postWork( - std::bind(&PartitionedConsumerImpl::internalListener, shared_from_this(), consumer)); - } - } -} - -void PartitionedConsumerImpl::failPendingReceiveCallback() { - Message msg; - Lock lock(pendingReceiveMutex_); - while (!pendingReceives_.empty()) { - ReceiveCallback callback = pendingReceives_.front(); - pendingReceives_.pop(); - listenerExecutor_->postWork(std::bind(callback, ResultAlreadyClosed, msg)); - } - lock.unlock(); -} - -void PartitionedConsumerImpl::internalListener(Consumer consumer) { - Message m; - messages_.pop(m); - try { - messageListener_(Consumer(shared_from_this()), m); - } catch (const std::exception& e) { - LOG_ERROR("Exception thrown from listener of Partitioned Consumer" << e.what()); - } -} - -void PartitionedConsumerImpl::receiveMessages() { - for (ConsumerList::const_iterator i = consumers_.begin(); i != consumers_.end(); i++) { - ConsumerImplPtr consumer = *i; - consumer->sendFlowPermitsToBroker(consumer->getCnx().lock(), conf_.getReceiverQueueSize()); - LOG_DEBUG("Sending FLOW command for consumer - " << consumer->getConsumerId()); - } -} - -Result PartitionedConsumerImpl::pauseMessageListener() { - if (!messageListener_) { - return ResultInvalidConfiguration; - } - for (ConsumerList::const_iterator i = consumers_.begin(); i != consumers_.end(); i++) { - (*i)->pauseMessageListener(); - } - return ResultOk; -} - -Result PartitionedConsumerImpl::resumeMessageListener() { - if (!messageListener_) { - return ResultInvalidConfiguration; - } - for (ConsumerList::const_iterator i = consumers_.begin(); i != consumers_.end(); i++) { - (*i)->resumeMessageListener(); - } - return ResultOk; -} - -void PartitionedConsumerImpl::redeliverUnacknowledgedMessages() { - LOG_DEBUG("Sending RedeliverUnacknowledgedMessages command for partitioned consumer."); - for (ConsumerList::const_iterator i = consumers_.begin(); i != consumers_.end(); i++) { - (*i)->redeliverUnacknowledgedMessages(); - } - unAckedMessageTrackerPtr_->clear(); -} - -void PartitionedConsumerImpl::redeliverUnacknowledgedMessages(const std::set& messageIds) { - if (messageIds.empty()) { - return; - } - if (conf_.getConsumerType() != ConsumerShared && conf_.getConsumerType() != ConsumerKeyShared) { - redeliverUnacknowledgedMessages(); - return; - } - LOG_DEBUG("Sending RedeliverUnacknowledgedMessages command for partitioned consumer."); - for (ConsumerList::const_iterator i = consumers_.begin(); i != consumers_.end(); i++) { - (*i)->redeliverUnacknowledgedMessages(messageIds); - } -} - -const std::string& PartitionedConsumerImpl::getName() const { return partitionStr_; } - -int PartitionedConsumerImpl::getNumOfPrefetchedMessages() const { return messages_.size(); } - -void PartitionedConsumerImpl::getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) { - Lock lock(mutex_); - if (state_ != Ready) { - lock.unlock(); - callback(ResultConsumerNotInitialized, BrokerConsumerStats()); - return; - } - const auto numPartitions = getNumPartitionsWithLock(); - PartitionedBrokerConsumerStatsPtr statsPtr = - std::make_shared(numPartitions); - LatchPtr latchPtr = std::make_shared(numPartitions); - ConsumerList consumerList = consumers_; - lock.unlock(); - for (int i = 0; i < consumerList.size(); i++) { - consumerList[i]->getBrokerConsumerStatsAsync( - std::bind(&PartitionedConsumerImpl::handleGetConsumerStats, shared_from_this(), - std::placeholders::_1, std::placeholders::_2, latchPtr, statsPtr, i, callback)); - } -} - -void PartitionedConsumerImpl::handleGetConsumerStats(Result res, BrokerConsumerStats brokerConsumerStats, - LatchPtr latchPtr, - PartitionedBrokerConsumerStatsPtr statsPtr, size_t index, - BrokerConsumerStatsCallback callback) { - Lock lock(mutex_); - if (res == ResultOk) { - latchPtr->countdown(); - statsPtr->add(brokerConsumerStats, index); - } else { - lock.unlock(); - callback(res, BrokerConsumerStats()); - return; - } - if (latchPtr->getCount() == 0) { - lock.unlock(); - callback(ResultOk, BrokerConsumerStats(statsPtr)); - } -} - -void PartitionedConsumerImpl::seekAsync(const MessageId& msgId, ResultCallback callback) { - callback(ResultOperationNotSupported); -} - -void PartitionedConsumerImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { - Lock stateLock(mutex_); - if (state_ != Ready) { - stateLock.unlock(); - callback(ResultAlreadyClosed); - return; - } - - // consumers_ could only be modified when state_ is Ready, so we needn't lock consumersMutex_ here - ConsumerList consumerList = consumers_; - stateLock.unlock(); - - MultiResultCallback multiResultCallback(callback, consumers_.size()); - for (ConsumerList::const_iterator i = consumerList.begin(); i != consumerList.end(); i++) { - (*i)->seekAsync(timestamp, multiResultCallback); - } -} - -void PartitionedConsumerImpl::runPartitionUpdateTask() { - partitionsUpdateTimer_->expires_from_now(partitionsUpdateInterval_); - partitionsUpdateTimer_->async_wait( - std::bind(&PartitionedConsumerImpl::getPartitionMetadata, shared_from_this())); -} - -void PartitionedConsumerImpl::getPartitionMetadata() { - using namespace std::placeholders; - lookupServicePtr_->getPartitionMetadataAsync(topicName_) - .addListener(std::bind(&PartitionedConsumerImpl::handleGetPartitions, shared_from_this(), - std::placeholders::_1, std::placeholders::_2)); -} - -void PartitionedConsumerImpl::handleGetPartitions(Result result, - const LookupDataResultPtr& lookupDataResult) { - Lock stateLock(mutex_); - if (state_ != Ready) { - return; - } - - if (!result) { - const auto newNumPartitions = static_cast(lookupDataResult->getPartitions()); - Lock consumersLock(consumersMutex_); - const auto currentNumPartitions = getNumPartitions(); - assert(currentNumPartitions == consumers_.size()); - if (newNumPartitions > currentNumPartitions) { - LOG_INFO("new partition count: " << newNumPartitions); - numPartitions_ = newNumPartitions; - const auto config = getSinglePartitionConsumerConfig(); - for (unsigned int i = currentNumPartitions; i < newNumPartitions; i++) { - auto consumer = newInternalConsumer(i, config); - consumer->start(); - consumers_.push_back(consumer); - } - // `runPartitionUpdateTask()` will be called in `handleSinglePartitionConsumerCreated()` - return; - } - } else { - LOG_WARN("Failed to getPartitionMetadata: " << strResult(result)); - } - - runPartitionUpdateTask(); -} - -void PartitionedConsumerImpl::setNegativeAcknowledgeEnabledForTesting(bool enabled) { - Lock lock(mutex_); - for (auto&& c : consumers_) { - c->setNegativeAcknowledgeEnabledForTesting(enabled); - } -} - -bool PartitionedConsumerImpl::isConnected() const { - Lock stateLock(mutex_); - if (state_ != Ready) { - return false; - } - stateLock.unlock(); - - Lock consumersLock(consumersMutex_); - const auto consumers = consumers_; - consumersLock.unlock(); - for (const auto& consumer : consumers_) { - if (!consumer->isConnected()) { - return false; - } - } - return true; -} - -uint64_t PartitionedConsumerImpl::getNumberOfConnectedConsumer() { - uint64_t numberOfConnectedConsumer = 0; - Lock consumersLock(consumersMutex_); - const auto consumers = consumers_; - consumersLock.unlock(); - for (const auto& consumer : consumers) { - if (consumer->isConnected()) { - numberOfConnectedConsumer++; - } - } - return numberOfConnectedConsumer; -} - -} // namespace pulsar diff --git a/pulsar-client-cpp/lib/PartitionedConsumerImpl.h b/pulsar-client-cpp/lib/PartitionedConsumerImpl.h deleted file mode 100644 index 7fa0ccdd1f4c9..0000000000000 --- a/pulsar-client-cpp/lib/PartitionedConsumerImpl.h +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#ifndef PULSAR_PARTITIONED_CONSUMER_HEADER -#define PULSAR_PARTITIONED_CONSUMER_HEADER -#include "lib/TestUtil.h" -#include "ConsumerImpl.h" -#include "ClientImpl.h" -#include -#include - -#include -#include "ConsumerImplBase.h" -#include "lib/UnAckedMessageTrackerDisabled.h" -#include -#include -#include - -namespace pulsar { -class PartitionedConsumerImpl; -class PartitionedConsumerImpl : public ConsumerImplBase, - public std::enable_shared_from_this { - public: - enum PartitionedConsumerState - { - Pending, - Ready, - Closing, - Closed, - Failed - }; - PartitionedConsumerImpl(ClientImplPtr client, const std::string& subscriptionName, - const TopicNamePtr topicName, const unsigned int numPartitions, - const ConsumerConfiguration& conf); - ~PartitionedConsumerImpl(); - // overrided methods from ConsumerImplBase - Future getConsumerCreatedFuture() override; - const std::string& getSubscriptionName() const override; - const std::string& getTopic() const override; - Result receive(Message& msg) override; - Result receive(Message& msg, int timeout) override; - void receiveAsync(ReceiveCallback& callback) override; - void unsubscribeAsync(ResultCallback callback) override; - void acknowledgeAsync(const MessageId& msgId, ResultCallback callback) override; - void acknowledgeCumulativeAsync(const MessageId& msgId, ResultCallback callback) override; - void closeAsync(ResultCallback callback) override; - void start() override; - void shutdown() override; - bool isClosed() override; - bool isOpen() override; - Result pauseMessageListener() override; - Result resumeMessageListener() override; - void redeliverUnacknowledgedMessages() override; - void redeliverUnacknowledgedMessages(const std::set& messageIds) override; - const std::string& getName() const override; - int getNumOfPrefetchedMessages() const override; - void getBrokerConsumerStatsAsync(BrokerConsumerStatsCallback callback) override; - void seekAsync(const MessageId& msgId, ResultCallback callback) override; - void seekAsync(uint64_t timestamp, ResultCallback callback) override; - void negativeAcknowledge(const MessageId& msgId) override; - bool isConnected() const override; - uint64_t getNumberOfConnectedConsumer() override; - - void handleGetConsumerStats(Result, BrokerConsumerStats, LatchPtr, PartitionedBrokerConsumerStatsPtr, - size_t, BrokerConsumerStatsCallback); - - private: - const ClientImplPtr client_; - const std::string subscriptionName_; - const TopicNamePtr topicName_; - unsigned int numPartitions_; - unsigned int numConsumersCreated_ = 0; - const ConsumerConfiguration conf_; - typedef std::vector ConsumerList; - ConsumerList consumers_; - // consumersMutex_ is used to share consumers_ and numPartitions_ - mutable std::mutex consumersMutex_; - mutable std::mutex mutex_; - std::mutex pendingReceiveMutex_; - PartitionedConsumerState state_ = Pending; - unsigned int unsubscribedSoFar_ = 0; - BlockingQueue messages_; - ExecutorServicePtr listenerExecutor_; - MessageListener messageListener_; - const std::string topic_; - const std::string name_; - const std::string partitionStr_; - ExecutorServicePtr internalListenerExecutor_; - DeadlineTimerPtr partitionsUpdateTimer_; - boost::posix_time::time_duration partitionsUpdateInterval_; - LookupServicePtr lookupServicePtr_; - - unsigned int getNumPartitions() const; - unsigned int getNumPartitionsWithLock() const; - ConsumerConfiguration getSinglePartitionConsumerConfig() const; - ConsumerImplPtr newInternalConsumer(unsigned int partition, const ConsumerConfiguration& config) const; - void setState(PartitionedConsumerState state); - void handleUnsubscribeAsync(Result result, unsigned int consumerIndex, ResultCallback callback); - void handleSinglePartitionConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumerImplBaseWeakPtr, - unsigned int partitionIndex); - void handleSinglePartitionConsumerClose(Result result, unsigned int partitionIndex, - CloseCallback callback); - void notifyResult(CloseCallback closeCallback); - void messageReceived(Consumer consumer, const Message& msg); - void internalListener(Consumer consumer); - void receiveMessages(); - void failPendingReceiveCallback(); - void setNegativeAcknowledgeEnabledForTesting(bool enabled) override; - Promise partitionedConsumerCreatedPromise_; - UnAckedMessageTrackerPtr unAckedMessageTrackerPtr_; - std::queue pendingReceives_; - void runPartitionUpdateTask(); - void getPartitionMetadata(); - void handleGetPartitions(const Result result, const LookupDataResultPtr& lookupDataResult); - - friend class PulsarFriend; - - FRIEND_TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery); -}; -typedef std::weak_ptr PartitionedConsumerImplWeakPtr; -typedef std::shared_ptr PartitionedConsumerImplPtr; -} // namespace pulsar -#endif // PULSAR_PARTITIONED_CONSUMER_HEADER diff --git a/pulsar-client-cpp/lib/PartitionedProducerImpl.cc b/pulsar-client-cpp/lib/PartitionedProducerImpl.cc index bdd23ed6c91c2..9f197f0d45fd4 100644 --- a/pulsar-client-cpp/lib/PartitionedProducerImpl.cc +++ b/pulsar-client-cpp/lib/PartitionedProducerImpl.cc @@ -136,28 +136,29 @@ void PartitionedProducerImpl::handleSinglePartitionProducerCreated(Result result unsigned int partitionIndex) { // to indicate, we are doing cleanup using closeAsync after producer create // has failed and the invocation of closeAsync is not from client - CloseCallback closeCallback = NULL; - Lock lock(mutex_); + const auto numPartitions = getNumPartitionsWithLock(); + assert(numProducersCreated_ <= numPartitions && partitionIndex <= numPartitions); + if (state_ == Failed) { - // Ignore, we have already informed client that producer creation failed + // We have already informed client that producer creation failed + if (++numProducersCreated_ == numPartitions) { + closeAsync(nullptr); + } return; } - const auto numPartitions = getNumPartitionsWithLock(); - assert(numProducersCreated_ <= numPartitions); + if (result != ResultOk) { - state_ = Failed; - lock.unlock(); - closeAsync(closeCallback); - partitionedProducerCreatedPromise_.setFailed(result); LOG_ERROR("Unable to create Producer for partition - " << partitionIndex << " Error - " << result); + partitionedProducerCreatedPromise_.setFailed(result); + state_ = Failed; + if (++numProducersCreated_ == numPartitions) { + closeAsync(nullptr); + } return; } - assert(partitionIndex <= numPartitions); - numProducersCreated_++; - if (numProducersCreated_ == numPartitions) { + if (++numProducersCreated_ == numPartitions) { state_ = Ready; - lock.unlock(); if (partitionsUpdateTimer_) { runPartitionUpdateTask(); } @@ -181,7 +182,7 @@ void PartitionedProducerImpl::createLazyPartitionProducer(unsigned int partition // override void PartitionedProducerImpl::sendAsync(const Message& msg, SendCallback callback) { - if (!assertState(Ready)) { + if (state_ != Ready) { callback(ResultAlreadyClosed, msg.getMessageId()); return; } @@ -211,18 +212,7 @@ void PartitionedProducerImpl::sendAsync(const Message& msg, SendCallback callbac } // override -void PartitionedProducerImpl::shutdown() { setState(Closed); } - -void PartitionedProducerImpl::setState(const PartitionedProducerState state) { - Lock lock(mutex_); - state_ = state; - lock.unlock(); -} - -bool PartitionedProducerImpl::assertState(const PartitionedProducerState state) { - Lock lock(mutex_); - return state_ == state; -} +void PartitionedProducerImpl::shutdown() { state_ = Closed; } const std::string& PartitionedProducerImpl::getProducerName() const { Lock producersLock(producersMutex_); @@ -251,7 +241,10 @@ int64_t PartitionedProducerImpl::getLastSequenceId() const { * create one or many producers for partitions. So, we have to notify with ERROR on createProducerFailure */ void PartitionedProducerImpl::closeAsync(CloseCallback closeCallback) { - setState(Closing); + if (state_ == Closing || state_ == Closed) { + return; + } + state_ = Closing; unsigned int producerAlreadyClosed = 0; @@ -280,7 +273,7 @@ void PartitionedProducerImpl::closeAsync(CloseCallback closeCallback) { * handleSinglePartitionProducerCreated */ if (producerAlreadyClosed == numProducers && closeCallback) { - setState(Closed); + state_ = Closed; closeCallback(ResultOk); } } @@ -288,14 +281,12 @@ void PartitionedProducerImpl::closeAsync(CloseCallback closeCallback) { void PartitionedProducerImpl::handleSinglePartitionProducerClose(Result result, const unsigned int partitionIndex, CloseCallback callback) { - Lock lock(mutex_); if (state_ == Failed) { // we should have already notified the client by callback return; } if (result != ResultOk) { state_ = Failed; - lock.unlock(); LOG_ERROR("Closing the producer failed for partition - " << partitionIndex); if (callback) { callback(result); @@ -309,7 +300,6 @@ void PartitionedProducerImpl::handleSinglePartitionProducerClose(Result result, // closed all successfully if (!numProducersCreated_) { state_ = Closed; - lock.unlock(); // set the producerCreatedPromise to failure, if client called // closeAsync and it's not failure to create producer, the promise // is set second time here, first time it was successful. So check @@ -395,7 +385,6 @@ void PartitionedProducerImpl::getPartitionMetadata() { void PartitionedProducerImpl::handleGetPartitions(Result result, const LookupDataResultPtr& lookupDataResult) { - Lock stateLock(mutex_); if (state_ != Ready) { return; } @@ -428,11 +417,9 @@ void PartitionedProducerImpl::handleGetPartitions(Result result, } bool PartitionedProducerImpl::isConnected() const { - Lock stateLock(mutex_); if (state_ != Ready) { return false; } - stateLock.unlock(); Lock producersLock(producersMutex_); const auto producers = producers_; diff --git a/pulsar-client-cpp/lib/PartitionedProducerImpl.h b/pulsar-client-cpp/lib/PartitionedProducerImpl.h index 874d6cda5265e..0a8c10e221303 100644 --- a/pulsar-client-cpp/lib/PartitionedProducerImpl.h +++ b/pulsar-client-cpp/lib/PartitionedProducerImpl.h @@ -30,7 +30,7 @@ namespace pulsar { class PartitionedProducerImpl : public ProducerImplBase, public std::enable_shared_from_this { public: - enum PartitionedProducerState + enum State { Pending, Ready, @@ -73,8 +73,6 @@ class PartitionedProducerImpl : public ProducerImplBase, void notifyResult(CloseCallback closeCallback); - void setState(PartitionedProducerState state); - friend class PulsarFriend; private: @@ -83,7 +81,7 @@ class PartitionedProducerImpl : public ProducerImplBase, const TopicNamePtr topicName_; const std::string topic_; - unsigned int numProducersCreated_ = 0; + std::atomic_uint numProducersCreated_{0}; /* * set when one or more Single Partition Creation fails, close will cleanup and fail the create callbackxo @@ -99,10 +97,7 @@ class PartitionedProducerImpl : public ProducerImplBase, mutable std::mutex producersMutex_; MessageRoutingPolicyPtr routerPolicy_; - // mutex_ is used to share state_, and numProducersCreated_ - mutable std::mutex mutex_; - - PartitionedProducerState state_ = Pending; + std::atomic state_{Pending}; // only set this promise to value, when producers on all partitions are created. Promise partitionedProducerCreatedPromise_; @@ -124,7 +119,6 @@ class PartitionedProducerImpl : public ProducerImplBase, void runPartitionUpdateTask(); void getPartitionMetadata(); void handleGetPartitions(const Result result, const LookupDataResultPtr& partitionMetadata); - bool assertState(const PartitionedProducerState state); }; } // namespace pulsar diff --git a/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc b/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc index 34e912d4ddce0..79ed1969d7870 100644 --- a/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc +++ b/pulsar-client-cpp/lib/PatternMultiTopicsConsumerImpl.cc @@ -55,8 +55,9 @@ void PatternMultiTopicsConsumerImpl::autoDiscoveryTimerTask(const boost::system: return; } - if (state_ != Ready) { - LOG_ERROR("Error in autoDiscoveryTimerTask consumer state not ready: " << state_); + const auto state = state_.load(); + if (state != Ready) { + LOG_ERROR("Error in autoDiscoveryTimerTask consumer state not ready: " << state); resetAutoDiscoveryTimer(); return; } diff --git a/pulsar-client-cpp/lib/PeriodicTask.cc b/pulsar-client-cpp/lib/PeriodicTask.cc index 533d38b5efa94..4e91ef5f7e150 100644 --- a/pulsar-client-cpp/lib/PeriodicTask.cc +++ b/pulsar-client-cpp/lib/PeriodicTask.cc @@ -27,9 +27,14 @@ void PeriodicTask::start() { } state_ = Ready; if (periodMs_ >= 0) { - auto self = shared_from_this(); + std::weak_ptr weakSelf{shared_from_this()}; timer_.expires_from_now(boost::posix_time::millisec(periodMs_)); - timer_.async_wait([this, self](const ErrorCode& ec) { handleTimeout(ec); }); + timer_.async_wait([weakSelf](const ErrorCode& ec) { + auto self = weakSelf.lock(); + if (self) { + self->handleTimeout(ec); + } + }); } } diff --git a/pulsar-client-cpp/lib/ProducerImpl.cc b/pulsar-client-cpp/lib/ProducerImpl.cc index f81e205475de4..b61897bbf0189 100644 --- a/pulsar-client-cpp/lib/ProducerImpl.cc +++ b/pulsar-client-cpp/lib/ProducerImpl.cc @@ -56,7 +56,9 @@ ProducerImpl::ProducerImpl(ClientImplPtr client, const std::string& topic, const producerStr_("[" + topic_ + ", " + producerName_ + "] "), producerId_(client->newProducerId()), msgSequenceGenerator_(0), - dataKeyGenIntervalSec_(4 * 60 * 60), + batchTimer_(executor_->getIOService()), + sendTimer_(executor_->getIOService()), + dataKeyRefreshTask_(executor_->getIOService(), 4 * 60 * 60 * 1000), memoryLimitController_(client->getMemoryLimitController()) { LOG_DEBUG("ProducerName - " << producerName_ << " Created producer on topic " << topic_ << " id: " << producerId_); @@ -101,7 +103,6 @@ ProducerImpl::ProducerImpl(ClientImplPtr client, const std::string& topic, const LOG_ERROR("Unknown batching type: " << conf_.getBatchingType()); return; } - batchTimer_ = executor_->createDeadlineTimer(); } } @@ -122,27 +123,11 @@ int64_t ProducerImpl::getLastSequenceId() const { return lastSequenceIdPublished const std::string& ProducerImpl::getSchemaVersion() const { return schemaVersion_; } -void ProducerImpl::refreshEncryptionKey(const boost::system::error_code& ec) { - if (ec) { - LOG_DEBUG("Ignoring timer cancelled event, code[" << ec << "]"); - return; - } - - msgCrypto_->addPublicKeyCipher(conf_.getEncryptionKeys(), conf_.getCryptoKeyReader()); - - dataKeyGenTImer_->expires_from_now(boost::posix_time::seconds(dataKeyGenIntervalSec_)); - dataKeyGenTImer_->async_wait( - std::bind(&pulsar::ProducerImpl::refreshEncryptionKey, shared_from_this(), std::placeholders::_1)); -} - void ProducerImpl::connectionOpened(const ClientConnectionPtr& cnx) { - Lock lock(mutex_); if (state_ == Closed) { - lock.unlock(); LOG_DEBUG(getName() << "connectionOpened : Producer is already closed"); return; } - lock.unlock(); ClientImplPtr client = client_.lock(); int requestId = client->newRequestId(); @@ -164,7 +149,6 @@ void ProducerImpl::connectionFailed(Result result) { // so don't change the state and allow reconnections return; } else if (producerCreatedPromise_.setFailed(result)) { - Lock lock(mutex_); state_ = Failed; } } @@ -175,14 +159,15 @@ void ProducerImpl::handleCreateProducer(const ClientConnectionPtr& cnx, Result r // make sure we're still in the Pending/Ready state, closeAsync could have been invoked // while waiting for this response if using lazy producers - Lock lock(mutex_); - if (state_ != Ready && state_ != Pending) { + const auto state = state_.load(); + if (state != Ready && state != Pending) { LOG_DEBUG("Producer created response received but producer already closed"); failPendingMessages(ResultAlreadyClosed, false); return; } if (result == ResultOk) { + Lock lock(mutex_); // We are now reconnected to broker and clear to send messages. Re-send all pending messages and // set the cnx pointer so that new messages will be sent immediately LOG_INFO(getName() << "Created producer on broker " << cnx->cnxString()); @@ -202,11 +187,19 @@ void ProducerImpl::handleCreateProducer(const ClientConnectionPtr& cnx, Result r backoff_.reset(); lock.unlock(); - if (!dataKeyGenTImer_ && conf_.isEncryptionEnabled()) { - dataKeyGenTImer_ = executor_->createDeadlineTimer(); - dataKeyGenTImer_->expires_from_now(boost::posix_time::seconds(dataKeyGenIntervalSec_)); - dataKeyGenTImer_->async_wait(std::bind(&pulsar::ProducerImpl::refreshEncryptionKey, - shared_from_this(), std::placeholders::_1)); + if (conf_.isEncryptionEnabled()) { + auto weakSelf = weak_from_this(); + dataKeyRefreshTask_.setCallback([this, weakSelf](const PeriodicTask::ErrorCode& ec) { + auto self = weakSelf.lock(); + if (!self) { + return; + } + if (ec) { + LOG_ERROR("DataKeyRefresh timer failed: " << ec.message()); + return; + } + msgCrypto_->addPublicKeyCipher(conf_.getEncryptionKeys(), conf_.getCryptoKeyReader()); + }); } // if the producer is lazy the send timeout timer is already running @@ -217,8 +210,6 @@ void ProducerImpl::handleCreateProducer(const ClientConnectionPtr& cnx, Result r producerCreatedPromise_.setValue(shared_from_this()); } else { - lock.unlock(); - // Producer creation failed if (result == ResultTimeout) { // Creating the producer has timed out. We need to ensure the broker closes the producer @@ -248,7 +239,6 @@ void ProducerImpl::handleCreateProducer(const ClientConnectionPtr& cnx, Result r LOG_ERROR(getName() << "Failed to create producer: " << strResult(result)); failPendingMessages(result, true); producerCreatedPromise_.setFailed(result); - Lock lock(mutex_); state_ = Failed; } } @@ -268,13 +258,14 @@ std::shared_ptr ProducerImpl::getPendingCallback } if (batchMessageContainer_) { - OpSendMsg opSendMsg; - if (batchMessageContainer_->createOpSendMsg(opSendMsg) == ResultOk) { - callbacks->opSendMsgs.emplace_back(opSendMsg); - } - - releaseSemaphoreForSendOp(opSendMsg); - batchMessageContainer_->clear(); + batchMessageContainer_->processAndClear( + [this, &callbacks](Result result, const OpSendMsg& opSendMsg) { + if (result == ResultOk) { + callbacks->opSendMsgs.emplace_back(opSendMsg); + } + releaseSemaphoreForSendOp(opSendMsg); + }, + nullptr); } pendingMessagesQueue_.clear(); @@ -333,9 +324,8 @@ void ProducerImpl::statsCallBackHandler(Result res, const MessageId& msgId, Send void ProducerImpl::flushAsync(FlushCallback callback) { if (batchMessageContainer_) { - Lock lock(mutex_); - if (state_ == Ready) { + Lock lock(mutex_); auto failures = batchMessageAndSend(callback); lock.unlock(); failures.complete(); @@ -349,8 +339,8 @@ void ProducerImpl::flushAsync(FlushCallback callback) { void ProducerImpl::triggerFlush() { if (batchMessageContainer_) { - Lock lock(mutex_); if (state_ == Ready) { + Lock lock(mutex_); auto failures = batchMessageAndSend(); lock.unlock(); failures.complete(); @@ -443,10 +433,29 @@ void ProducerImpl::sendAsync(const Message& msg, SendCallback callback) { bool isFirstMessage = batchMessageContainer_->isFirstMessageToAdd(msg); bool isFull = batchMessageContainer_->add(msg, cb); if (isFirstMessage) { - batchTimer_->expires_from_now( + batchTimer_.expires_from_now( boost::posix_time::milliseconds(conf_.getBatchingMaxPublishDelayMs())); - batchTimer_->async_wait(std::bind(&ProducerImpl::batchMessageTimeoutHandler, shared_from_this(), - std::placeholders::_1)); + auto weakSelf = weak_from_this(); + batchTimer_.async_wait([this, weakSelf](const boost::system::error_code& ec) { + auto self = weakSelf.lock(); + if (!self) { + return; + } + if (ec) { + LOG_DEBUG(getName() << " Ignoring timer cancelled event, code[" << ec << "]"); + return; + } + LOG_DEBUG(getName() << " - Batch Message Timer expired"); + + // ignore if the producer is already closing/closed + const auto state = state_.load(); + if (state == Pending || state == Ready) { + Lock lock(mutex_); + auto failures = batchMessageAndSend(); + lock.unlock(); + failures.complete(); + } + }); } if (isFull) { @@ -505,17 +514,10 @@ void ProducerImpl::releaseSemaphoreForSendOp(const OpSendMsg& op) { PendingFailures ProducerImpl::batchMessageAndSend(const FlushCallback& flushCallback) { PendingFailures failures; LOG_DEBUG("batchMessageAndSend " << *batchMessageContainer_); - batchTimer_->cancel(); + batchTimer_.cancel(); - if (PULSAR_UNLIKELY(batchMessageContainer_->isEmpty())) { - if (flushCallback) { - flushCallback(ResultOk); - } - } else { - const size_t numBatches = batchMessageContainer_->getNumBatches(); - if (numBatches == 1) { - OpSendMsg opSendMsg; - Result result = batchMessageContainer_->createOpSendMsg(opSendMsg, flushCallback); + batchMessageContainer_->processAndClear( + [this, &failures](Result result, const OpSendMsg& opSendMsg) { if (result == ResultOk) { sendMessage(opSendMsg); } else { @@ -525,25 +527,8 @@ PendingFailures ProducerImpl::batchMessageAndSend(const FlushCallback& flushCall releaseSemaphoreForSendOp(opSendMsg); failures.add(std::bind(opSendMsg.sendCallback_, result, MessageId{})); } - } else if (numBatches > 1) { - std::vector opSendMsgs; - std::vector results = batchMessageContainer_->createOpSendMsgs(opSendMsgs, flushCallback); - for (size_t i = 0; i < results.size(); i++) { - if (results[i] == ResultOk) { - sendMessage(opSendMsgs[i]); - } else { - // A spot has been reserved for this batch, but the batch failed to be pushed to the - // queue, so we need to release the spot manually - LOG_ERROR("batchMessageAndSend | Failed to createOpSendMsgs[" << i - << "]: " << results[i]); - releaseSemaphoreForSendOp(opSendMsgs[i]); - failures.add(std::bind(opSendMsgs[i].sendCallback_, results[i], MessageId{})); - } - } - } // else numBatches is 0, do nothing - } - - batchMessageContainer_->clear(); + }, + flushCallback); return failures; } @@ -566,22 +551,6 @@ void ProducerImpl::sendMessage(const OpSendMsg& op) { } } -void ProducerImpl::batchMessageTimeoutHandler(const boost::system::error_code& ec) { - if (ec) { - LOG_DEBUG(getName() << " Ignoring timer cancelled event, code[" << ec << "]"); - return; - } - LOG_DEBUG(getName() << " - Batch Message Timer expired"); - - // ignore if the producer is already closing/closed - Lock lock(mutex_); - if (state_ == Pending || state_ == Ready) { - auto failures = batchMessageAndSend(); - lock.unlock(); - failures.complete(); - } -} - void ProducerImpl::printStats() { if (batchMessageContainer_) { LOG_INFO("Producer - " << producerStr_ << ", [batchMessageContainer = " << *batchMessageContainer_ @@ -592,11 +561,9 @@ void ProducerImpl::printStats() { } void ProducerImpl::closeAsync(CloseCallback callback) { - Lock lock(mutex_); - // if the producer was never started then there is nothing to clean up - if (state_ == NotStarted) { - state_ = Closed; + State expectedState = NotStarted; + if (state_.compare_exchange_strong(expectedState, Closed)) { callback(ResultOk); return; } @@ -609,9 +576,11 @@ void ProducerImpl::closeAsync(CloseCallback callback) { // ensure any remaining send callbacks are called before calling the close callback failPendingMessages(ResultAlreadyClosed, false); - if (state_ != Ready && state_ != Pending) { + // TODO maybe we need a loop here to implement CAS for a condition, + // just like Java's `getAndUpdate` method on an atomic variable + const auto state = state_.load(); + if (state != Ready && state != Pending) { state_ = Closed; - lock.unlock(); if (callback) { callback(ResultAlreadyClosed); } @@ -624,7 +593,7 @@ void ProducerImpl::closeAsync(CloseCallback callback) { ClientConnectionPtr cnx = getCnx().lock(); if (!cnx) { state_ = Closed; - lock.unlock(); + if (callback) { callback(ResultOk); } @@ -638,7 +607,6 @@ void ProducerImpl::closeAsync(CloseCallback callback) { ClientImplPtr client = client_.lock(); if (!client) { state_ = Closed; - lock.unlock(); // Client was already destroyed if (callback) { callback(ResultOk); @@ -646,7 +614,6 @@ void ProducerImpl::closeAsync(CloseCallback callback) { return; } - lock.unlock(); int requestId = client->newRequestId(); Future future = cnx->sendRequestWithId(Commands::newCloseProducer(producerId_, requestId), requestId); @@ -659,7 +626,6 @@ void ProducerImpl::closeAsync(CloseCallback callback) { void ProducerImpl::handleClose(Result result, ResultCallback callback, ProducerImplPtr producer) { if (result == ResultOk) { - Lock lock(mutex_); state_ = Closed; LOG_INFO(getName() << "Closed producer"); ClientConnectionPtr cnx = getCnx().lock(); @@ -682,10 +648,11 @@ Future ProducerImpl::getProducerCreatedFuture() uint64_t ProducerImpl::getProducerId() const { return producerId_; } void ProducerImpl::handleSendTimeout(const boost::system::error_code& err) { - Lock lock(mutex_); - if (state_ != Pending && state_ != Ready) { + const auto state = state_.load(); + if (state != Pending && state != Ready) { return; } + Lock lock(mutex_); if (err == boost::asio::error::operation_aborted) { LOG_DEBUG(getName() << "Timer cancelled: " << err.message()); @@ -698,8 +665,8 @@ void ProducerImpl::handleSendTimeout(const boost::system::error_code& err) { std::shared_ptr pendingCallbacks; if (pendingMessagesQueue_.empty()) { // If there are no pending messages, reset the timeout to the configured value. - sendTimer_->expires_from_now(milliseconds(conf_.getSendTimeout())); LOG_DEBUG(getName() << "Producer timeout triggered on empty pending message queue"); + asyncWaitSendTimeout(milliseconds(conf_.getSendTimeout())); } else { // If there is at least one message, calculate the diff between the message timeout and // the current time. @@ -709,17 +676,14 @@ void ProducerImpl::handleSendTimeout(const boost::system::error_code& err) { LOG_DEBUG(getName() << "Timer expired. Calling timeout callbacks."); pendingCallbacks = getPendingCallbacksWhenFailed(); // Since the pending queue is cleared now, set timer to expire after configured value. - sendTimer_->expires_from_now(milliseconds(conf_.getSendTimeout())); + asyncWaitSendTimeout(milliseconds(conf_.getSendTimeout())); } else { // The diff is greater than zero, set the timeout to the diff value LOG_DEBUG(getName() << "Timer hasn't expired yet, setting new timeout " << diff); - sendTimer_->expires_from_now(diff); + asyncWaitSendTimeout(diff); } } - // Asynchronously wait for the timeout to trigger - sendTimer_->async_wait( - std::bind(&ProducerImpl::handleSendTimeout, shared_from_this(), std::placeholders::_1)); lock.unlock(); if (pendingCallbacks) { pendingCallbacks->complete(ResultTimeout); @@ -843,53 +807,41 @@ void ProducerImpl::shutdown() { } void ProducerImpl::cancelTimers() { - if (dataKeyGenTImer_) { - dataKeyGenTImer_->cancel(); - dataKeyGenTImer_.reset(); - } - - if (batchTimer_) { - batchTimer_->cancel(); - batchTimer_.reset(); - } - - if (sendTimer_) { - sendTimer_->cancel(); - sendTimer_.reset(); - } + dataKeyRefreshTask_.stop(); + batchTimer_.cancel(); + sendTimer_.cancel(); } bool ProducerImplCmp::operator()(const ProducerImplPtr& a, const ProducerImplPtr& b) const { return a->getProducerId() < b->getProducerId(); } -bool ProducerImpl::isClosed() { - Lock lock(mutex_); - return state_ == Closed; -} +bool ProducerImpl::isClosed() { return state_ == Closed; } -bool ProducerImpl::isConnected() const { - Lock lock(mutex_); - return !getCnx().expired() && state_ == Ready; -} +bool ProducerImpl::isConnected() const { return !getCnx().expired() && state_ == Ready; } uint64_t ProducerImpl::getNumberOfConnectedProducer() { return isConnected() ? 1 : 0; } -bool ProducerImpl::isStarted() const { - Lock lock(mutex_); - return state_ != NotStarted; -} +bool ProducerImpl::isStarted() const { return state_ != NotStarted; } void ProducerImpl::startSendTimeoutTimer() { - // Initialize the sendTimer only once per producer and only when producer timeout is - // configured. Set the timeout as configured value and asynchronously wait for the - // timeout to happen. - if (!sendTimer_ && conf_.getSendTimeout() > 0) { - sendTimer_ = executor_->createDeadlineTimer(); - sendTimer_->expires_from_now(milliseconds(conf_.getSendTimeout())); - sendTimer_->async_wait( - std::bind(&ProducerImpl::handleSendTimeout, shared_from_this(), std::placeholders::_1)); + if (conf_.getSendTimeout() > 0) { + asyncWaitSendTimeout(milliseconds(conf_.getSendTimeout())); } } +void ProducerImpl::asyncWaitSendTimeout(DurationType expiryTime) { + sendTimer_.expires_from_now(expiryTime); + + auto weakSelf = weak_from_this(); + sendTimer_.async_wait([weakSelf](const boost::system::error_code& err) { + auto self = weakSelf.lock(); + if (self) { + std::static_pointer_cast(self)->handleSendTimeout(err); + } + }); +} + +ProducerImplWeakPtr ProducerImpl::weak_from_this() noexcept { return shared_from_this(); } + } // namespace pulsar /* namespace pulsar */ diff --git a/pulsar-client-cpp/lib/ProducerImpl.h b/pulsar-client-cpp/lib/ProducerImpl.h index d29efed1a13ae..3db10533a1286 100644 --- a/pulsar-client-cpp/lib/ProducerImpl.h +++ b/pulsar-client-cpp/lib/ProducerImpl.h @@ -35,6 +35,7 @@ #include "BatchMessageContainerBase.h" #include "PendingFailures.h" #include "Semaphore.h" +#include "PeriodicTask.h" using namespace pulsar; @@ -83,6 +84,9 @@ class ProducerImpl : public HandlerBase, int32_t partition() const noexcept { return partition_; } + // NOTE: this method is introduced into `enable_shared_from_this` since C++17 + ProducerImplWeakPtr weak_from_this() noexcept; + protected: ProducerStatsBasePtr producerStatsBasePtr_; @@ -92,8 +96,6 @@ class ProducerImpl : public HandlerBase, void sendMessage(const OpSendMsg& opSendMsg); - void batchMessageTimeoutHandler(const boost::system::error_code& ec); - void startSendTimeoutTimer(); friend class PulsarFriend; @@ -147,14 +149,16 @@ class ProducerImpl : public HandlerBase, proto::BaseCommand cmd_; std::unique_ptr batchMessageContainer_; - DeadlineTimerPtr batchTimer_; + boost::asio::deadline_timer batchTimer_; PendingFailures batchMessageAndSend(const FlushCallback& flushCallback = nullptr); volatile int64_t lastSequenceIdPublished_; std::string schemaVersion_; - DeadlineTimerPtr sendTimer_; + boost::asio::deadline_timer sendTimer_; void handleSendTimeout(const boost::system::error_code& err); + using DurationType = typename boost::asio::deadline_timer::duration_type; + void asyncWaitSendTimeout(DurationType expiryTime); Promise producerCreatedPromise_; @@ -165,8 +169,7 @@ class ProducerImpl : public HandlerBase, void failPendingMessages(Result result, bool withLock); MessageCryptoPtr msgCrypto_; - DeadlineTimerPtr dataKeyGenTImer_; - uint32_t dataKeyGenIntervalSec_; + PeriodicTask dataKeyRefreshTask_; MemoryLimitController& memoryLimitController_; }; diff --git a/pulsar-client-cpp/lib/ReaderImpl.cc b/pulsar-client-cpp/lib/ReaderImpl.cc index 48f5d5866f453..5f78068228ff4 100644 --- a/pulsar-client-cpp/lib/ReaderImpl.cc +++ b/pulsar-client-cpp/lib/ReaderImpl.cc @@ -35,7 +35,8 @@ ReaderImpl::ReaderImpl(const ClientImplPtr client, const std::string& topic, con const ExecutorServicePtr listenerExecutor, ReaderCallback readerCreatedCallback) : topic_(topic), client_(client), readerConf_(conf), readerCreatedCallback_(readerCreatedCallback) {} -void ReaderImpl::start(const MessageId& startMessageId) { +void ReaderImpl::start(const MessageId& startMessageId, + std::function callback) { ConsumerConfiguration consumerConf; consumerConf.setConsumerType(ConsumerExclusive); consumerConf.setReceiverQueueSize(readerConf_.getReceiverQueueSize()); @@ -79,22 +80,21 @@ void ReaderImpl::start(const MessageId& startMessageId) { client_.lock(), topic_, subscription, consumerConf, ExecutorServicePtr(), false, NonPartitioned, Commands::SubscriptionModeNonDurable, Optional::of(startMessageId)); consumer_->setPartitionIndex(TopicName::getPartitionIndex(topic_)); - consumer_->getConsumerCreatedFuture().addListener(std::bind(&ReaderImpl::handleConsumerCreated, - shared_from_this(), std::placeholders::_1, - std::placeholders::_2)); + auto self = shared_from_this(); + consumer_->getConsumerCreatedFuture().addListener( + [this, self, callback](Result result, const ConsumerImplBaseWeakPtr& weakConsumerPtr) { + if (result == ResultOk) { + callback(weakConsumerPtr); + readerCreatedCallback_(result, Reader(self)); + } else { + readerCreatedCallback_(result, {}); + } + }); consumer_->start(); } const std::string& ReaderImpl::getTopic() const { return consumer_->getTopic(); } -void ReaderImpl::handleConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumer) { - auto self = shared_from_this(); - readerCreatedCallback_(result, Reader(self)); - readerImplWeakPtr_ = self; -} - -ConsumerImplPtr ReaderImpl::getConsumer() { return consumer_; } - Result ReaderImpl::readNext(Message& msg) { Result res = consumer_->receive(msg); acknowledgeIfNecessary(res, msg); @@ -139,11 +139,11 @@ void ReaderImpl::seekAsync(uint64_t timestamp, ResultCallback callback) { } void ReaderImpl::getLastMessageIdAsync(GetLastMessageIdCallback callback) { - consumer_->getLastMessageIdAsync(callback); + consumer_->getLastMessageIdAsync([callback](Result result, const GetLastMessageIdResponse& response) { + callback(result, response.getLastMessageId()); + }); } -ReaderImplWeakPtr ReaderImpl::getReaderImplWeakPtr() { return readerImplWeakPtr_; } - bool ReaderImpl::isConnected() const { return consumer_->isConnected(); } } // namespace pulsar diff --git a/pulsar-client-cpp/lib/ReaderImpl.h b/pulsar-client-cpp/lib/ReaderImpl.h index a546ae87e790d..b0d8a6bc40a21 100644 --- a/pulsar-client-cpp/lib/ReaderImpl.h +++ b/pulsar-client-cpp/lib/ReaderImpl.h @@ -42,7 +42,7 @@ class PULSAR_PUBLIC ReaderImpl : public std::enable_shared_from_this ReaderImpl(const ClientImplPtr client, const std::string& topic, const ReaderConfiguration& conf, const ExecutorServicePtr listenerExecutor, ReaderCallback readerCreatedCallback); - void start(const MessageId& startMessageId); + void start(const MessageId& startMessageId, std::function callback); const std::string& getTopic() const; @@ -53,7 +53,7 @@ class PULSAR_PUBLIC ReaderImpl : public std::enable_shared_from_this Future getReaderCreatedFuture(); - ConsumerImplPtr getConsumer(); + ConsumerImplBaseWeakPtr getConsumer() const noexcept { return consumer_; } void hasMessageAvailableAsync(HasMessageAvailableCallback callback); @@ -62,13 +62,9 @@ class PULSAR_PUBLIC ReaderImpl : public std::enable_shared_from_this void getLastMessageIdAsync(GetLastMessageIdCallback callback); - ReaderImplWeakPtr getReaderImplWeakPtr(); - bool isConnected() const; private: - void handleConsumerCreated(Result result, ConsumerImplBaseWeakPtr consumer); - void messageListener(Consumer consumer, const Message& msg); void acknowledgeIfNecessary(Result result, const Message& msg); @@ -79,7 +75,6 @@ class PULSAR_PUBLIC ReaderImpl : public std::enable_shared_from_this ConsumerImplPtr consumer_; ReaderCallback readerCreatedCallback_; ReaderListener readerListener_; - ReaderImplWeakPtr readerImplWeakPtr_; }; } // namespace pulsar diff --git a/pulsar-client-cpp/lib/SynchronizedHashMap.h b/pulsar-client-cpp/lib/SynchronizedHashMap.h new file mode 100644 index 0000000000000..184ca6a283623 --- /dev/null +++ b/pulsar-client-cpp/lib/SynchronizedHashMap.h @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#pragma once + +#include +#include +#include +#include +#include +#include "Utils.h" + +namespace pulsar { + +// V must be default constructible and copyable +template +class SynchronizedHashMap { + using MutexType = std::recursive_mutex; + using Lock = std::lock_guard; + + public: + using OptValue = Optional; + using PairVector = std::vector>; + + SynchronizedHashMap() = default; + + SynchronizedHashMap(const PairVector& pairs) { + for (auto&& kv : pairs) { + data_.emplace(kv.first, kv.second); + } + } + + template + void emplace(Args&&... args) { + Lock lock(mutex_); + data_.emplace(std::forward(args)...); + } + + void forEach(std::function f) const { + Lock lock(mutex_); + for (const auto& kv : data_) { + f(kv.first, kv.second); + } + } + + void forEachValue(std::function f) const { + Lock lock(mutex_); + for (const auto& kv : data_) { + f(kv.second); + } + } + + void clear() { + Lock lock(mutex_); + data_.clear(); + } + + // clear the map and apply `f` on each removed value + void clear(std::function f) { + Lock lock(mutex_); + auto it = data_.begin(); + while (it != data_.end()) { + f(it->first, it->second); + auto next = data_.erase(it); + it = next; + } + } + + OptValue find(const K& key) const { + Lock lock(mutex_); + auto it = data_.find(key); + if (it != data_.end()) { + return OptValue::of(it->second); + } else { + return OptValue::empty(); + } + } + + OptValue findFirstValueIf(std::function f) const { + Lock lock(mutex_); + for (const auto& kv : data_) { + if (f(kv.second)) { + return OptValue::of(kv.second); + } + } + return OptValue::empty(); + } + + OptValue remove(const K& key) { + Lock lock(mutex_); + auto it = data_.find(key); + if (it != data_.end()) { + auto result = OptValue::of(it->second); + data_.erase(it); + return result; + } else { + return OptValue::empty(); + } + } + + // This method is only used for test + PairVector toPairVector() const { + Lock lock(mutex_); + PairVector pairs; + for (auto&& kv : data_) { + pairs.emplace_back(kv); + } + return pairs; + } + + // This method is only used for test + size_t size() const noexcept { + Lock lock(mutex_); + return data_.size(); + } + + private: + std::unordered_map data_; + // Use recursive_mutex to allow methods being called in `forEach` + mutable MutexType mutex_; +}; + +} // namespace pulsar diff --git a/pulsar-client-cpp/lib/TimeUtils.h b/pulsar-client-cpp/lib/TimeUtils.h index 1da7d65923a9f..45157ae855b98 100644 --- a/pulsar-client-cpp/lib/TimeUtils.h +++ b/pulsar-client-cpp/lib/TimeUtils.h @@ -19,6 +19,8 @@ #pragma once #include +#include +#include #include @@ -33,4 +35,50 @@ class PULSAR_PUBLIC TimeUtils { static ptime now(); static int64_t currentTimeMillis(); }; + +// This class processes a timeout with the following semantics: +// > 0: wait at most the timeout until a blocking operation completes +// == 0: do not wait the blocking operation +// < 0: wait infinitely until a blocking operation completes. +// +// Here is a simple example usage: +// +// ```c++ +// // Wait at most 300 milliseconds +// TimeoutProcessor timeoutProcessor{300}; +// while (!allOperationsAreDone()) { +// timeoutProcessor.tik(); +// // This method may block for some time +// performBlockingOperation(timeoutProcessor.getLeftTimeout()); +// timeoutProcessor.tok(); +// } +// ``` +// +// The template argument is the same as std::chrono::duration. +template +class TimeoutProcessor { + public: + using Clock = std::chrono::high_resolution_clock; + + TimeoutProcessor(long timeout) : leftTimeout_(timeout) {} + + long getLeftTimeout() const noexcept { return leftTimeout_; } + + void tik() { before_ = Clock::now(); } + + void tok() { + if (leftTimeout_ > 0) { + leftTimeout_ -= std::chrono::duration_cast(Clock::now() - before_).count(); + if (leftTimeout_ <= 0) { + // The timeout exceeds, getLeftTimeout() will return 0 to indicate we should not wait more + leftTimeout_ = 0; + } + } + } + + private: + std::atomic_long leftTimeout_; + std::chrono::time_point before_; +}; + } // namespace pulsar diff --git a/pulsar-client-cpp/lib/VersionInternal.h b/pulsar-client-cpp/lib/VersionInternal.h new file mode 100644 index 0000000000000..c2560352692dd --- /dev/null +++ b/pulsar-client-cpp/lib/VersionInternal.h @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#ifndef LIB_VERSION_INTERNAL_H_ +#define LIB_VERSION_INTERNAL_H_ + +#ifndef _PULSAR_VERSION_INTERNAL_ +#define _PULSAR_VERSION_INTERNAL_ "unknown" +#endif + +#endif /* LIB_VERSION_INTERNAL_H_ */ diff --git a/pulsar-client-cpp/lib/auth/AuthOauth2.cc b/pulsar-client-cpp/lib/auth/AuthOauth2.cc index 87a217e818f87..31225b15a71c5 100644 --- a/pulsar-client-cpp/lib/auth/AuthOauth2.cc +++ b/pulsar-client-cpp/lib/auth/AuthOauth2.cc @@ -23,7 +23,6 @@ #include #include #include -#include #include DECLARE_LOG_OBJECT() @@ -86,22 +85,12 @@ CachedToken::~CachedToken() {} // Oauth2CachedToken -static int64_t currentTimeMillis() { - using namespace boost::posix_time; - using boost::posix_time::milliseconds; - using boost::posix_time::seconds; - static ptime time_t_epoch(boost::gregorian::date(1970, 1, 1)); - - time_duration diff = microsec_clock::universal_time() - time_t_epoch; - return diff.total_milliseconds(); -} - Oauth2CachedToken::Oauth2CachedToken(Oauth2TokenResultPtr token) { latest_ = token; int64_t expiredIn = token->getExpiresIn(); if (expiredIn > 0) { - expiresAt_ = expiredIn + currentTimeMillis(); + expiresAt_ = Clock::now() + std::chrono::seconds(expiredIn); } else { throw std::runtime_error("ExpiresIn in Oauth2TokenResult invalid value: " + std::to_string(expiredIn)); @@ -113,7 +102,7 @@ AuthenticationDataPtr Oauth2CachedToken::getAuthData() { return authData_; } Oauth2CachedToken::~Oauth2CachedToken() {} -bool Oauth2CachedToken::isExpired() { return expiresAt_ < currentTimeMillis(); } +bool Oauth2CachedToken::isExpired() { return expiresAt_ < Clock::now(); } // OauthFlow @@ -154,6 +143,8 @@ ClientCredentialFlow::ClientCredentialFlow(ParamMap& params) audience_(params["audience"]), scope_(params["scope"]) {} +std::string ClientCredentialFlow::getTokenEndPoint() const { return tokenEndPoint_; } + static size_t curlWriteCallback(void* contents, size_t size, size_t nmemb, void* responseDataPtr) { ((std::string*)responseDataPtr)->append((char*)contents, size * nmemb); return size * nmemb; @@ -179,7 +170,12 @@ void ClientCredentialFlow::initialize() { curl_easy_setopt(handle, CURLOPT_CUSTOMREQUEST, "GET"); // set URL: well-know endpoint - curl_easy_setopt(handle, CURLOPT_URL, (issuerUrl_ + "/.well-known/openid-configuration").c_str()); + std::string wellKnownUrl = issuerUrl_; + if (wellKnownUrl.back() == '/') { + wellKnownUrl.pop_back(); + } + wellKnownUrl.append("/.well-known/openid-configuration"); + curl_easy_setopt(handle, CURLOPT_URL, wellKnownUrl.c_str()); // Write callback curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, curlWriteCallback); @@ -190,8 +186,6 @@ void ClientCredentialFlow::initialize() { curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1L); - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 0L); - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYHOST, 0L); char errorBuffer[CURL_ERROR_SIZE]; curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errorBuffer); @@ -315,8 +309,6 @@ Oauth2TokenResultPtr ClientCredentialFlow::authenticate() { curl_easy_setopt(handle, CURLOPT_FORBID_REUSE, 1L); curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1L); - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 0L); - curl_easy_setopt(handle, CURLOPT_SSL_VERIFYHOST, 0L); curl_easy_setopt(handle, CURLOPT_POSTFIELDS, postData.c_str()); diff --git a/pulsar-client-cpp/lib/auth/AuthOauth2.h b/pulsar-client-cpp/lib/auth/AuthOauth2.h index b1a5ec63a9424..986919ddfcd9c 100644 --- a/pulsar-client-cpp/lib/auth/AuthOauth2.h +++ b/pulsar-client-cpp/lib/auth/AuthOauth2.h @@ -20,6 +20,7 @@ #pragma once #include +#include #include namespace pulsar { @@ -56,6 +57,7 @@ class ClientCredentialFlow : public Oauth2Flow { void close(); ParamMap generateParamMap() const; + std::string getTokenEndPoint() const; private: std::string tokenEndPoint_; @@ -67,13 +69,15 @@ class ClientCredentialFlow : public Oauth2Flow { class Oauth2CachedToken : public CachedToken { public: + using Clock = std::chrono::high_resolution_clock; + Oauth2CachedToken(Oauth2TokenResultPtr token); ~Oauth2CachedToken(); bool isExpired(); AuthenticationDataPtr getAuthData(); private: - int64_t expiresAt_; + std::chrono::time_point expiresAt_; Oauth2TokenResultPtr latest_; AuthenticationDataPtr authData_; }; diff --git a/pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc b/pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc index 90c60dfc991ad..aaec12cff6f8d 100644 --- a/pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc +++ b/pulsar-client-cpp/lib/c/c_ConsumerConfiguration.cc @@ -185,3 +185,13 @@ int pulsar_consumer_get_subscription_initial_position( pulsar_consumer_configuration_t *consumer_configuration) { return consumer_configuration->consumerConfiguration.getSubscriptionInitialPosition(); } + +void pulsar_consumer_configuration_set_priority_level(pulsar_consumer_configuration_t *consumer_configuration, + int priority_level) { + consumer_configuration->consumerConfiguration.setPriorityLevel(priority_level); +} + +int pulsar_consumer_configuration_get_priority_level( + pulsar_consumer_configuration_t *consumer_configuration) { + return consumer_configuration->consumerConfiguration.getPriorityLevel(); +} diff --git a/pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc b/pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc index f26f63a593b08..906a4d8230c41 100644 --- a/pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc +++ b/pulsar-client-cpp/lib/c/c_ProducerConfiguration.cc @@ -209,7 +209,7 @@ void pulsar_producer_configuration_set_default_crypto_key_reader(pulsar_producer conf->conf.setCryptoKeyReader(keyReader); } -pulsar_producer_crypto_failure_action pulsar_producer_configuration_set_crypto_failure_action( +pulsar_producer_crypto_failure_action pulsar_producer_configuration_get_crypto_failure_action( pulsar_producer_configuration_t *conf) { return (pulsar_producer_crypto_failure_action)conf->conf.getCryptoFailureAction(); } diff --git a/pulsar-client-cpp/lib/checksum/crc32c_sse42.cc b/pulsar-client-cpp/lib/checksum/crc32c_sse42.cc index 1ed88e888e39c..f866480f09ea4 100644 --- a/pulsar-client-cpp/lib/checksum/crc32c_sse42.cc +++ b/pulsar-client-cpp/lib/checksum/crc32c_sse42.cc @@ -31,7 +31,8 @@ #include "lib/checksum/crc32c_sw.h" #include "gf2.hpp" -#if BOOST_ARCH_X86_64 +#if BOOST_ARCH_X86_64 && !defined(__arm64__) +#define PULSAR_X86_64 #include // SSE4.2 #include // PCLMUL #else @@ -44,7 +45,7 @@ #ifdef _MSC_VER #include -#elif BOOST_ARCH_X86_64 +#elif defined(PULSAR_X86_64) #include #endif @@ -79,7 +80,7 @@ bool crc32c_initialize() { __cpuid(CPUInfo, 1); has_sse42 = (CPUInfo[2] & cpuid_ecx_sse42) != 0; has_pclmulqdq = (CPUInfo[2] & cpuid_ecx_pclmulqdq) != 0; -#elif BOOST_ARCH_X86_64 +#elif defined(PULSAR_X86_64) const uint32_t cpuid_ecx_sse42 = (1 << 20); const uint32_t cpuid_ecx_pclmulqdq = (1 << 1); unsigned int eax, ebx, ecx, edx; @@ -116,7 +117,7 @@ void chunk_config::make_shift_table(size_t bytes, uint32_t table[256]) { for (unsigned int i = 0; i < 256; ++i) table[i] = (const bitvector<32>)mul(m, bitvector<32>(i)); } -#if BOOST_ARCH_X86_64 +#ifdef PULSAR_X86_64 static uint32_t crc32c_chunk(uint32_t crc, const void *buf, const chunk_config &config) { DEBUG_PRINTF3(" crc32c_chunk(crc = 0x%08x, buf = %p, config.words = " SIZE_T_FORMAT ")", crc, buf, @@ -259,7 +260,7 @@ uint32_t crc32c(uint32_t init, const void *buf, size_t len, const chunk_config * return crc; } -#else // ! BOOST_ARCH_X86_64 +#else // ! PULSAR_X86_64 uint32_t crc32c(uint32_t init, const void *buf, size_t len, const chunk_config *config) { // SSE 4.2 extension for hw implementation are not present diff --git a/pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc b/pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc index af7ae4b9c0440..a3608e5701720 100644 --- a/pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc +++ b/pulsar-client-cpp/lib/stats/ProducerStatsImpl.cc @@ -26,7 +26,7 @@ namespace pulsar { DECLARE_LOG_OBJECT(); -static const std::array probs = {0.5, 0.9, 0.99, 0.999}; +static const std::array probs = {{0.5, 0.9, 0.99, 0.999}}; std::string ProducerStatsImpl::latencyToString(const LatencyAccumulator& obj) { boost::accumulators::detail::extractor_result< diff --git a/pulsar-client-cpp/pkg/deb/Dockerfile b/pulsar-client-cpp/pkg/deb/Dockerfile index 77c3f470c068f..1925827b56ac4 100644 --- a/pulsar-client-cpp/pkg/deb/Dockerfile +++ b/pulsar-client-cpp/pkg/deb/Dockerfile @@ -49,12 +49,12 @@ RUN curl -O -L https://github.com/google/protobuf/releases/download/v3.3.0/prot rm -rf /protobuf-cpp-3.3.0.tar.gz /protobuf-3.3.0 # ZLib -RUN curl -O -L https://github.com/madler/zlib/archive/v1.2.11.tar.gz && \ - tar xvfz v1.2.11.tar.gz && \ - cd zlib-1.2.11 && \ +RUN curl -O -L https://github.com/madler/zlib/archive/v1.2.12.tar.gz && \ + tar xvfz v1.2.12.tar.gz && \ + cd zlib-1.2.12 && \ CFLAGS="-fPIC -O3" ./configure && \ make && make install && \ - rm -rf /v1.2.11.tar.gz /zlib-1.2.11 + rm -rf /v1.2.12.tar.gz /zlib-1.2.12 # Zstandard RUN curl -O -L https://github.com/facebook/zstd/releases/download/v1.3.7/zstd-1.3.7.tar.gz && \ diff --git a/pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt b/pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt index cf25d04b7e1d1..f1f93cd6a6af5 100644 --- a/pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt +++ b/pulsar-client-cpp/pkg/licenses/LICENSE-zlib.txt @@ -1,7 +1,7 @@ zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.11, January 15th, 2017 + version 1.2.12, March 27th, 2022 - Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages diff --git a/pulsar-client-cpp/pkg/rpm/Dockerfile b/pulsar-client-cpp/pkg/rpm/Dockerfile index c2406c5c7d32a..e83290dccca74 100644 --- a/pulsar-client-cpp/pkg/rpm/Dockerfile +++ b/pulsar-client-cpp/pkg/rpm/Dockerfile @@ -49,12 +49,12 @@ RUN curl -O -L https://github.com/google/protobuf/releases/download/v3.3.0/prot rm -rf /protobuf-cpp-3.3.0.tar.gz /protobuf-3.3.0 # ZLib -RUN curl -O -L https://github.com/madler/zlib/archive/v1.2.11.tar.gz && \ - tar xvfz v1.2.11.tar.gz && \ - cd zlib-1.2.11 && \ +RUN curl -O -L https://github.com/madler/zlib/archive/v1.2.12.tar.gz && \ + tar xvfz v1.2.12.tar.gz && \ + cd zlib-1.2.12 && \ CFLAGS="-fPIC -O3" ./configure && \ make && make install && \ - rm -rf /v1.2.11.tar.gz /zlib-1.2.11 + rm -rf /v1.2.12.tar.gz /zlib-1.2.12 # Zstandard RUN curl -O -L https://github.com/facebook/zstd/releases/download/v1.3.7/zstd-1.3.7.tar.gz && \ diff --git a/pulsar-client-cpp/pulsar-test-service-start.sh b/pulsar-client-cpp/pulsar-test-service-start.sh index 248d628b9c2a9..2bee18e64b9c5 100755 --- a/pulsar-client-cpp/pulsar-test-service-start.sh +++ b/pulsar-client-cpp/pulsar-test-service-start.sh @@ -106,6 +106,10 @@ $PULSAR_DIR/bin/pulsar-admin namespaces grant-permission public/default-4 \ --role "anonymous" $PULSAR_DIR/bin/pulsar-admin namespaces set-encryption-required public/default-4 -e +# Create "public/test-backlog-quotas" to test backlog quotas policy +$PULSAR_DIR/bin/pulsar-admin namespaces create public/test-backlog-quotas \ + --clusters standalone + # Create "private" tenant $PULSAR_DIR/bin/pulsar-admin tenants create private -r "" -c "standalone" diff --git a/pulsar-client-cpp/python/CMakeLists.txt b/pulsar-client-cpp/python/CMakeLists.txt index f7d40699a86e9..ee4a6b2b03284 100644 --- a/pulsar-client-cpp/python/CMakeLists.txt +++ b/pulsar-client-cpp/python/CMakeLists.txt @@ -60,6 +60,10 @@ if (NOT DEFINED ${Boost_PYTHON39-MT_LIBRARY}) set(Boost_PYTHON39-MT_LIBRARY ${Boost_PYTHON39_LIBRARY}) endif() +if (NOT DEFINED ${Boost_PYTHON310-MT_LIBRARY}) + set(Boost_PYTHON310-MT_LIBRARY ${Boost_PYTHON310_LIBRARY}) +endif() + # Try all possible boost-python variable namings set(PYTHON_WRAPPER_LIBS ${Boost_PYTHON_LIBRARY} ${Boost_PYTHON3_LIBRARY} @@ -69,14 +73,26 @@ set(PYTHON_WRAPPER_LIBS ${Boost_PYTHON_LIBRARY} ${Boost_PYTHON35_LIBRARY} ${Boost_PYTHON36_LIBRARY} ${Boost_PYTHON38_LIBRARY} - ${Boost_PYTHON39_LIBRARY}) + ${Boost_PYTHON39_LIBRARY} + ${Boost_PYTHON310_LIBRARY} + ) if (APPLE) - set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} - ${Boost_PYTHON27-MT_LIBRARY_RELEASE} - ${Boost_PYTHON37-MT_LIBRARY_RELEASE} - ${Boost_PYTHON38-MT_LIBRARY_RELEASE} - ${Boost_PYTHON39-MT_LIBRARY_RELEASE}) + if (Boost_PYTHON27-MT_LIBRARY_RELEASE) + set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON27-MT_LIBRARY_RELEASE}) + endif () + if (Boost_PYTHON37-MT_LIBRARY_RELEASE) + set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON37-MT_LIBRARY_RELEASE}) + endif () + if (Boost_PYTHON38-MT_LIBRARY_RELEASE) + set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON38-MT_LIBRARY_RELEASE}) + endif () + if (Boost_PYTHON39-MT_LIBRARY_RELEASE) + set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON39-MT_LIBRARY_RELEASE}) + endif () + if (Boost_PYTHON310-MT_LIBRARY_RELEASE) + set(PYTHON_WRAPPER_LIBS ${PYTHON_WRAPPER_LIBS} ${Boost_PYTHON310-MT_LIBRARY_RELEASE}) + endif () endif() message(STATUS "Using Boost Python libs: ${PYTHON_WRAPPER_LIBS}") diff --git a/pulsar-client-cpp/python/build-mac-wheels.sh b/pulsar-client-cpp/python/build-mac-wheels.sh new file mode 100755 index 0000000000000..0a8823d1875ea --- /dev/null +++ b/pulsar-client-cpp/python/build-mac-wheels.sh @@ -0,0 +1,286 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +set -e + +ARCHS=( + 'x86_64' + 'arm64' +) + +PYTHON_VERSIONS=( + '3.8 3.8.13' + '3.9 3.9.10' + '3.10 3.10.2' +) + +export MACOSX_DEPLOYMENT_TARGET=11.0 +MACOSX_DEPLOYMENT_TARGET_MAJOR=${MACOSX_DEPLOYMENT_TARGET%%.*} + +ZLIB_VERSION=1.2.13 +OPENSSL_VERSION=1_1_1n +BOOST_VERSION=1.78.0 +PROTOBUF_VERSION=3.20.0 +ZSTD_VERSION=1.5.2 +SNAPPY_VERSION=1.1.3 +CURL_VERSION=7.61.0 + +ROOT_DIR=$(git rev-parse --show-toplevel) +cd "${ROOT_DIR}/pulsar-client-cpp" + + +# Compile and cache dependencies +CACHE_DIR=~/.pulsar-mac-wheels-cache +mkdir -p $CACHE_DIR + +cd $CACHE_DIR + +PREFIX=$CACHE_DIR/install + +############################################################################### +for line in "${PYTHON_VERSIONS[@]}"; do + read -r -a PY <<< "$line" + PYTHON_VERSION=${PY[0]} + PYTHON_VERSION_LONG=${PY[1]} + + if [ ! -f Python-${PYTHON_VERSION_LONG}/.done ]; then + echo "Building Python $PYTHON_VERSION_LONG" + curl -O -L https://www.python.org/ftp/python/${PYTHON_VERSION_LONG}/Python-${PYTHON_VERSION_LONG}.tgz + tar xfz Python-${PYTHON_VERSION_LONG}.tgz + + PY_PREFIX=$CACHE_DIR/py-$PYTHON_VERSION + pushd Python-${PYTHON_VERSION_LONG} + CFLAGS="-fPIC -O3 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ + ./configure --prefix=$PY_PREFIX --enable-shared --enable-universalsdk --with-universal-archs=universal2 + make -j16 + make install + + curl -O -L https://files.pythonhosted.org/packages/27/d6/003e593296a85fd6ed616ed962795b2f87709c3eee2bca4f6d0fe55c6d00/wheel-0.37.1-py2.py3-none-any.whl + $PY_PREFIX/bin/pip3 install wheel-*.whl + + touch .done + popd + else + echo "Using cached Python $PYTHON_VERSION_LONG" + fi +done + +############################################################################### +if [ ! -f zlib-${ZLIB_VERSION}/.done ]; then + echo "Building ZLib" + curl -O -L https://zlib.net/fossils/zlib-${ZLIB_VERSION}.tar.gz + tar xvfz zlib-$ZLIB_VERSION.tar.gz + pushd zlib-$ZLIB_VERSION + CFLAGS="-fPIC -O3 -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" ./configure --prefix=$PREFIX + make -j16 + make install + touch .done + popd +else + echo "Using cached ZLib" +fi + +############################################################################### +if [ ! -f openssl-OpenSSL_${OPENSSL_VERSION}.done ]; then + echo "Building OpenSSL" + curl -O -L https://github.com/openssl/openssl/archive/OpenSSL_${OPENSSL_VERSION}.tar.gz + # -arch arm64 -arch x86_64 + tar xvfz OpenSSL_${OPENSSL_VERSION}.tar.gz + mv openssl-OpenSSL_${OPENSSL_VERSION} openssl-OpenSSL_${OPENSSL_VERSION}-arm64 + pushd openssl-OpenSSL_${OPENSSL_VERSION}-arm64 + CFLAGS="-fPIC -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ + ./Configure --prefix=$PREFIX no-shared darwin64-arm64-cc + make -j8 + make install + popd + + tar xvfz OpenSSL_${OPENSSL_VERSION}.tar.gz + mv openssl-OpenSSL_${OPENSSL_VERSION} openssl-OpenSSL_${OPENSSL_VERSION}-x86_64 + pushd openssl-OpenSSL_${OPENSSL_VERSION}-x86_64 + CFLAGS="-fPIC -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ + ./Configure --prefix=$PREFIX no-shared darwin64-x86_64-cc + make -j8 + make install + popd + + # Create universal binaries + lipo -create openssl-OpenSSL_${OPENSSL_VERSION}-arm64/libssl.a openssl-OpenSSL_${OPENSSL_VERSION}-x86_64/libssl.a \ + -output $PREFIX/lib/libssl.a + lipo -create openssl-OpenSSL_${OPENSSL_VERSION}-arm64/libcrypto.a openssl-OpenSSL_${OPENSSL_VERSION}-x86_64/libcrypto.a \ + -output $PREFIX/lib/libcrypto.a + + touch openssl-OpenSSL_${OPENSSL_VERSION}.done +else + echo "Using cached OpenSSL" +fi + +############################################################################### +BOOST_VERSION_=${BOOST_VERSION//./_} +for line in "${PYTHON_VERSIONS[@]}"; do + read -r -a PY <<< "$line" + PYTHON_VERSION=${PY[0]} + PYTHON_VERSION_LONG=${PY[1]} + + DIR=boost-src-${BOOST_VERSION}-python-${PYTHON_VERSION} + if [ ! -f $DIR/.done ]; then + echo "Building Boost for Py $PYTHON_VERSION" + curl -O -L https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz + tar xfz boost_${BOOST_VERSION_}.tar.gz + mv boost_${BOOST_VERSION_} $DIR + + PY_PREFIX=$CACHE_DIR/py-$PYTHON_VERSION + + pushd $DIR + cat < user-config.jam + using python : $PYTHON_VERSION + : python3 + : ${PY_PREFIX}/include/python${PYTHON_VERSION} + : ${PY_PREFIX}/lib + ; +EOF + ./bootstrap.sh --with-libraries=python --with-python=python3 --with-python-root=$PY_PREFIX \ + --prefix=$CACHE_DIR/boost-py-$PYTHON_VERSION + ./b2 address-model=64 cxxflags="-fPIC -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ + link=static threading=multi \ + --user-config=./user-config.jam \ + variant=release python=${PYTHON_VERSION} \ + -j16 \ + install + touch .done + popd + else + echo "Using cached Boost for Py $PYTHON_VERSION" + fi + +done + + + +############################################################################### +if [ ! -f protobuf-${PROTOBUF_VERSION}/.done ]; then + echo "Building Protobuf" + curl -O -L https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/protobuf-cpp-${PROTOBUF_VERSION}.tar.gz + tar xvfz protobuf-cpp-${PROTOBUF_VERSION}.tar.gz + pushd protobuf-${PROTOBUF_VERSION} + CXXFLAGS="-fPIC -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ + ./configure --prefix=$PREFIX + make -j16 + make install + touch .done + popd +else + echo "Using cached Protobuf" +fi + +############################################################################### +if [ ! -f zstd-${ZSTD_VERSION}/.done ]; then + echo "Building ZStd" + curl -O -L https://github.com/facebook/zstd/releases/download/v${ZSTD_VERSION}/zstd-${ZSTD_VERSION}.tar.gz + tar xvfz zstd-${ZSTD_VERSION}.tar.gz + pushd zstd-${ZSTD_VERSION} + CFLAGS="-fPIC -O3 -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" PREFIX=$PREFIX \ + make -j16 install + touch .done + popd +else + echo "Using cached ZStd" +fi + +############################################################################### +if [ ! -f snappy-${SNAPPY_VERSION}/.done ]; then + echo "Building Snappy" + curl -O -L https://github.com/google/snappy/releases/download/${SNAPPY_VERSION}/snappy-${SNAPPY_VERSION}.tar.gz + tar xvfz snappy-${SNAPPY_VERSION}.tar.gz + pushd snappy-${SNAPPY_VERSION} + CXXFLAGS="-fPIC -O3 -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ + ./configure --prefix=$PREFIX + make -j16 + make install + touch .done + popd +else + echo "Using cached Snappy" +fi + +############################################################################### +if [ ! -f curl-${CURL_VERSION}/.done ]; then + echo "Building LibCurl" + CURL_VERSION_=${CURL_VERSION//./_} + curl -O -L https://github.com/curl/curl/releases/download/curl-${CURL_VERSION_}/curl-${CURL_VERSION}.tar.gz + tar xfz curl-${CURL_VERSION}.tar.gz + pushd curl-${CURL_VERSION} + CFLAGS="-fPIC -arch arm64 -arch x86_64 -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET}" \ + ./configure --with-ssl=$PREFIX \ + --without-nghttp2 --without-libidn2 --disable-ldap \ + --prefix=$PREFIX + make -j16 install + touch .done + popd +else + echo "Using cached LibCurl" +fi + +############################################################################### +############################################################################### +############################################################################### +############################################################################### + +for line in "${PYTHON_VERSIONS[@]}"; do + read -r -a PY <<< "$line" + PYTHON_VERSION=${PY[0]} + PYTHON_VERSION_LONG=${PY[1]} + echo '----------------------------------------------------------------------------' + echo '----------------------------------------------------------------------------' + echo '----------------------------------------------------------------------------' + echo "Build wheel for Python $PYTHON_VERSION" + + cd "${ROOT_DIR}/pulsar-client-cpp" + + find . -name CMakeCache.txt | xargs -r rm + find . -name CMakeFiles | xargs -r rm -rf + + PY_PREFIX=$CACHE_DIR/py-$PYTHON_VERSION + PY_EXE=$PY_PREFIX/bin/python3 + + cmake . \ + -DCMAKE_OSX_ARCHITECTURES='arm64;x86_64' \ + -DCMAKE_OSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET} \ + -DCMAKE_OSX_SYSROOT=/Library/Developer/CommandLineTools/SDKs/MacOSX${MACOSX_DEPLOYMENT_TARGET_MAJOR}.sdk \ + -DCMAKE_INSTALL_PREFIX=$PREFIX \ + -DCMAKE_INSTALL_LIBDIR=$PREFIX/lib \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_PREFIX_PATH=$PREFIX \ + -DCMAKE_CXX_FLAGS=-I$PREFIX/include \ + -DCMAKE_FIND_FRAMEWORK=$PREFIX \ + -DBoost_INCLUDE_DIR=$CACHE_DIR/boost-py-$PYTHON_VERSION/include \ + -DBoost_LIBRARY_DIRS=$CACHE_DIR/boost-py-$PYTHON_VERSION/lib \ + -DPYTHON_INCLUDE_DIR=$PY_PREFIX/include/python$PYTHON_VERSION \ + -DPYTHON_LIBRARY=$PY_PREFIX/lib/libpython${PYTHON_VERSION}.dylib \ + -DLINK_STATIC=ON \ + -DBUILD_TESTS=OFF \ + -DBUILD_WIRESHARK=OFF \ + -DPROTOC_PATH=$PREFIX/bin/protoc + + make clean + make _pulsar -j16 + + cd python + $PY_EXE setup.py bdist_wheel +done diff --git a/pulsar-client-cpp/python/examples/company.avsc b/pulsar-client-cpp/python/examples/company.avsc new file mode 100644 index 0000000000000..5fb186092182b --- /dev/null +++ b/pulsar-client-cpp/python/examples/company.avsc @@ -0,0 +1,21 @@ +{ + "doc": "this is doc", + "namespace": "example.avro", + "type": "record", + "name": "Company", + "fields": [ + {"name": "name", "type": ["null", "string"]}, + {"name": "address", "type": ["null", "string"]}, + {"name": "employees", "type": ["null", {"type": "array", "items": { + "type": "record", + "name": "Employee", + "fields": [ + {"name": "name", "type": ["null", "string"]}, + {"name": "age", "type": ["null", "int"]} + ] + }}]}, + {"name": "labels", "type": ["null", {"type": "map", "values": "string"}]}, + {"name": "companyType", "type": ["null", {"type": "enum", "name": "CompanyType", "symbols": + ["companyType1", "companyType2", "companyType3"]}]} + ] +} \ No newline at end of file diff --git a/pulsar-client-cpp/python/pulsar/schema/__init__.py b/pulsar-client-cpp/python/pulsar/schema/__init__.py index 150629d0f2f25..efa680666a729 100644 --- a/pulsar-client-cpp/python/pulsar/schema/__init__.py +++ b/pulsar-client-cpp/python/pulsar/schema/__init__.py @@ -18,7 +18,7 @@ # from .definition import Record, Field, Null, Boolean, Integer, Long, \ - Float, Double, Bytes, String, Array, Map + Float, Double, Bytes, String, Array, Map, CustomEnum from .schema import Schema, BytesSchema, StringSchema, JsonSchema from .schema_avro import AvroSchema diff --git a/pulsar-client-cpp/python/pulsar/schema/definition.py b/pulsar-client-cpp/python/pulsar/schema/definition.py index fd778f3293677..a7a235b25a6e3 100644 --- a/pulsar-client-cpp/python/pulsar/schema/definition.py +++ b/pulsar-client-cpp/python/pulsar/schema/definition.py @@ -44,8 +44,7 @@ def _get_fields(cls, dct): fields = OrderedDict() for name, value in dct.items(): if issubclass(type(value), EnumMeta): - # Wrap Python enums - value = _Enum(value) + value = CustomEnum(value) elif type(value) == RecordMeta: # We expect an instance of a record rather than the class itself value = value() @@ -125,6 +124,12 @@ def schema_info(cls, defined_names): schema['namespace'] = cls._avro_namespace schema['fields'] = [] + def get_filed_default_value(value): + if isinstance(value, Enum): + return value.name + else: + return value + if cls._sorted_fields: fields = sorted(cls._fields.keys()) else: @@ -135,7 +140,7 @@ def schema_info(cls, defined_names): if field._required else ['null', field.schema_info(defined_names)] schema['fields'].append({ 'name': name, - 'default': field.default(), + 'default': get_filed_default_value(field.default()), 'type': field_type }) if field.required_default() else schema['fields'].append({ 'name': name, @@ -179,7 +184,7 @@ def python_type(self): return self.__class__ def validate_type(self, name, val): - if not val and not self._required: + if val is None and not self._required: return self.default() if not isinstance(val, self.__class__): @@ -214,7 +219,7 @@ def python_type(self): pass def validate_type(self, name, val): - if not val and not self._required: + if val is None and not self._required: return self.default() if type(val) != self.python_type(): @@ -345,7 +350,7 @@ def python_type(self): def validate_type(self, name, val): t = type(val) - if not val and not self._required: + if val is None and not self._required: return self.default() if not (t is str or t.__name__ == 'unicode'): @@ -360,15 +365,16 @@ def default(self): # Complex types -class _Enum(Field): - def __init__(self, enum_type): + +class CustomEnum(Field): + def __init__(self, enum_type, default=None, required=False, required_default=False): if not issubclass(enum_type, Enum): raise Exception(enum_type + " is not a valid Enum type") self.enum_type = enum_type self.values = {} for x in enum_type.__members__.values(): self.values[x.value] = x - super(_Enum, self).__init__() + super(CustomEnum, self).__init__(default, required, required_default) def type(self): return 'enum' diff --git a/pulsar-client-cpp/python/pulsar/schema/schema.py b/pulsar-client-cpp/python/pulsar/schema/schema.py index 083efc353596b..349087ed75e03 100644 --- a/pulsar-client-cpp/python/pulsar/schema/schema.py +++ b/pulsar-client-cpp/python/pulsar/schema/schema.py @@ -85,11 +85,16 @@ def _get_serialized_value(self, o): def encode(self, obj): self._validate_object_type(obj) - del obj.__dict__['_default'] - del obj.__dict__['_required'] - del obj.__dict__['_required_default'] - - return json.dumps(obj.__dict__, default=self._get_serialized_value, indent=True).encode('utf-8') + # Copy the dict of the object as to not modify the provided object via the reference provided + data = obj.__dict__.copy() + if '_default' in data: + del data['_default'] + if '_required' in data: + del data['_required'] + if '_required_default' in data: + del data['_required_default'] + + return json.dumps(data, default=self._get_serialized_value, indent=True).encode('utf-8') def decode(self, data): return self._record_cls(**json.loads(data)) diff --git a/pulsar-client-cpp/python/pulsar/schema/schema_avro.py b/pulsar-client-cpp/python/pulsar/schema/schema_avro.py index e76fc51affbe8..58615053ca16f 100644 --- a/pulsar-client-cpp/python/pulsar/schema/schema_avro.py +++ b/pulsar-client-cpp/python/pulsar/schema/schema_avro.py @@ -32,10 +32,15 @@ if HAS_AVRO: class AvroSchema(Schema): - def __init__(self, record_cls): - super(AvroSchema, self).__init__(record_cls, _pulsar.SchemaType.AVRO, - record_cls.schema(), 'AVRO') - self._schema = record_cls.schema() + def __init__(self, record_cls, schema_definition=None): + if record_cls is None and schema_definition is None: + raise AssertionError("The param record_cls and schema_definition shouldn't be both None.") + + if record_cls is not None: + self._schema = record_cls.schema() + else: + self._schema = schema_definition + super(AvroSchema, self).__init__(record_cls, _pulsar.SchemaType.AVRO, self._schema, 'AVRO') def _get_serialized_value(self, x): if isinstance(x, enum.Enum): @@ -53,9 +58,14 @@ def _get_serialized_value(self, x): return x def encode(self, obj): - self._validate_object_type(obj) buffer = io.BytesIO() - m = self.encode_dict(obj.__dict__) + m = obj + if self._record_cls is not None: + self._validate_object_type(obj) + m = self.encode_dict(obj.__dict__) + elif not isinstance(obj, dict): + raise ValueError('If using the custom schema, the record data should be dict type.') + fastavro.schemaless_writer(buffer, self._schema, m) return buffer.getvalue() @@ -68,11 +78,14 @@ def encode_dict(self, d): def decode(self, data): buffer = io.BytesIO(data) d = fastavro.schemaless_reader(buffer, self._schema) - return self._record_cls(**d) + if self._record_cls is not None: + return self._record_cls(**d) + else: + return d else: class AvroSchema(Schema): - def __init__(self, _record_cls): + def __init__(self, _record_cls, _schema_definition): raise Exception("Avro library support was not found. Make sure to install Pulsar client " + "with Avro support: pip3 install 'pulsar-client[avro]'") diff --git a/pulsar-client-cpp/python/pulsar_test.py b/pulsar-client-cpp/python/pulsar_test.py index 8db53bdaf6e59..fd3656b1eedb2 100755 --- a/pulsar-client-cpp/python/pulsar_test.py +++ b/pulsar-client-cpp/python/pulsar_test.py @@ -25,10 +25,19 @@ import pulsar import uuid from datetime import timedelta -from pulsar import Client, MessageId, \ - CompressionType, ConsumerType, PartitionsRoutingMode, \ - AuthenticationTLS, Authentication, AuthenticationToken, InitialPosition, \ - CryptoKeyReader +from pulsar import ( + Client, + MessageId, + CompressionType, + ConsumerType, + PartitionsRoutingMode, + AuthenticationTLS, + Authentication, + AuthenticationToken, + InitialPosition, + CryptoKeyReader, +) +from pulsar.schema import JsonSchema, Record, Integer from _pulsar import ProducerConfiguration, ConsumerConfiguration @@ -46,19 +55,19 @@ def doHttpPost(url, data): req = Request(url, data.encode()) - req.add_header('Content-Type', 'application/json') + req.add_header("Content-Type", "application/json") urlopen(req) def doHttpPut(url, data): try: req = Request(url, data.encode()) - req.add_header('Content-Type', 'application/json') - req.get_method = lambda: 'PUT' + req.add_header("Content-Type", "application/json") + req.get_method = lambda: "PUT" urlopen(req) except Exception as ex: # ignore conflicts exception to have test idempotency - if '409' in str(ex): + if "409" in str(ex): pass else: raise ex @@ -66,16 +75,21 @@ def doHttpPut(url, data): def doHttpGet(url): req = Request(url) - req.add_header('Accept', 'application/json') + req.add_header("Accept", "application/json") return urlopen(req).read() +class TestRecord(Record): + a = Integer() + b = Integer() + + class PulsarTest(TestCase): - serviceUrl = 'pulsar://localhost:6650' - adminUrl = 'http://localhost:8080' + serviceUrl = "pulsar://localhost:6650" + adminUrl = "http://localhost:8080" - serviceUrlTls = 'pulsar+ssl://localhost:6651' + serviceUrlTls = "pulsar+ssl://localhost:6651" def test_producer_config(self): conf = ProducerConfiguration() @@ -95,7 +109,7 @@ def test_consumer_config(self): conf.consumer_type(ConsumerType.Shared) self.assertEqual(conf.consumer_type(), ConsumerType.Shared) - self.assertEqual(conf.consumer_name(), '') + self.assertEqual(conf.consumer_name(), "") conf.consumer_name("my-name") self.assertEqual(conf.consumer_name(), "my-name") @@ -105,8 +119,8 @@ def test_consumer_config(self): def test_connect_error(self): with self.assertRaises(pulsar.ConnectError): - client = Client('fakeServiceUrl') - client.create_producer('connect-error-topic') + client = Client("fakeServiceUrl") + client.create_producer("connect-error-topic") client.close() def test_exception_inheritance(self): @@ -115,23 +129,23 @@ def test_exception_inheritance(self): def test_simple_producer(self): client = Client(self.serviceUrl) - producer = client.create_producer('my-python-topic') - producer.send(b'hello') + producer = client.create_producer("my-python-topic") + producer.send(b"hello") producer.close() client.close() def test_producer_send_async(self): client = Client(self.serviceUrl) - producer = client.create_producer('my-python-topic') + producer = client.create_producer("my-python-topic") sent_messages = [] def send_callback(producer, msg): sent_messages.append(msg) - producer.send_async(b'hello', send_callback) - producer.send_async(b'hello', send_callback) - producer.send_async(b'hello', send_callback) + producer.send_async(b"hello", send_callback) + producer.send_async(b"hello", send_callback) + producer.send_async(b"hello", send_callback) i = 0 while len(sent_messages) < 3 and i < 100: @@ -142,28 +156,26 @@ def send_callback(producer, msg): def test_producer_send(self): client = Client(self.serviceUrl) - topic = 'test_producer_send' + topic = "test_producer_send" producer = client.create_producer(topic) - consumer = client.subscribe(topic, 'sub-name') - msg_id = producer.send(b'hello') - print('send to {}'.format(msg_id)) + consumer = client.subscribe(topic, "sub-name") + msg_id = producer.send(b"hello") + print("send to {}".format(msg_id)) msg = consumer.receive(TM) consumer.acknowledge(msg) - print('receive from {}'.format(msg.message_id())) + print("receive from {}".format(msg.message_id())) self.assertEqual(msg_id, msg.message_id()) client.close() def test_producer_consumer(self): client = Client(self.serviceUrl) - consumer = client.subscribe('my-python-topic-producer-consumer', - 'my-sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('my-python-topic-producer-consumer') - producer.send(b'hello') + consumer = client.subscribe("my-python-topic-producer-consumer", "my-sub", consumer_type=ConsumerType.Shared) + producer = client.create_producer("my-python-topic-producer-consumer") + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") with self.assertRaises(pulsar.Timeout): consumer.receive(100) @@ -173,12 +185,14 @@ def test_producer_consumer(self): def test_redelivery_count(self): client = Client(self.serviceUrl) - consumer = client.subscribe('my-python-topic-redelivery-count', - 'my-sub', - consumer_type=ConsumerType.Shared, - negative_ack_redelivery_delay_ms=500) - producer = client.create_producer('my-python-topic-redelivery-count') - producer.send(b'hello') + consumer = client.subscribe( + "my-python-topic-redelivery-count", + "my-sub", + consumer_type=ConsumerType.Shared, + negative_ack_redelivery_delay_ms=500, + ) + producer = client.create_producer("my-python-topic-redelivery-count") + producer.send(b"hello") redelivery_count = 0 for i in range(4): @@ -188,7 +202,7 @@ def test_redelivery_count(self): redelivery_count = msg.redelivery_count() self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") self.assertEqual(3, redelivery_count) consumer.unsubscribe() producer.close() @@ -196,12 +210,10 @@ def test_redelivery_count(self): def test_deliver_at(self): client = Client(self.serviceUrl) - consumer = client.subscribe('my-python-topic-deliver-at', - 'my-sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('my-python-topic-deliver-at') + consumer = client.subscribe("my-python-topic-deliver-at", "my-sub", consumer_type=ConsumerType.Shared) + producer = client.create_producer("my-python-topic-deliver-at") # Delay message in 1.1s - producer.send(b'hello', deliver_at=int(round(time.time() * 1000)) + 1100) + producer.send(b"hello", deliver_at=int(round(time.time() * 1000)) + 1100) # Message should not be available in the next second with self.assertRaises(pulsar.Timeout): @@ -210,19 +222,17 @@ def test_deliver_at(self): # Message should be published now msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") consumer.unsubscribe() producer.close() client.close() def test_deliver_after(self): client = Client(self.serviceUrl) - consumer = client.subscribe('my-python-topic-deliver-after', - 'my-sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('my-python-topic-deliver-after') + consumer = client.subscribe("my-python-topic-deliver-after", "my-sub", consumer_type=ConsumerType.Shared) + producer = client.create_producer("my-python-topic-deliver-after") # Delay message in 1.1s - producer.send(b'hello', deliver_after=timedelta(milliseconds=1100)) + producer.send(b"hello", deliver_after=timedelta(milliseconds=1100)) # Message should not be available in the next second with self.assertRaises(pulsar.Timeout): @@ -231,33 +241,35 @@ def test_deliver_after(self): # Message should be published in the next 500ms msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") consumer.unsubscribe() producer.close() client.close() def test_consumer_initial_position(self): client = Client(self.serviceUrl) - producer = client.create_producer('consumer-initial-position') + producer = client.create_producer("consumer-initial-position") # Sending 5 messages before consumer creation. # These should be received with initial_position set to Earliest but not with Latest. for i in range(5): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) - consumer = client.subscribe('consumer-initial-position', - 'my-sub', - consumer_type=ConsumerType.Shared, - initial_position=InitialPosition.Earliest) + consumer = client.subscribe( + "consumer-initial-position", + "my-sub", + consumer_type=ConsumerType.Shared, + initial_position=InitialPosition.Earliest, + ) # Sending 5 other messages that should be received regardless of the initial_position. for i in range(5, 10): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) for i in range(10): msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) with self.assertRaises(pulsar.Timeout): consumer.receive(100) @@ -267,65 +279,59 @@ def test_consumer_initial_position(self): def test_consumer_queue_size_is_zero(self): client = Client(self.serviceUrl) - consumer = client.subscribe('my-python-topic-consumer-init-queue-size-is-zero', - 'my-sub', - consumer_type=ConsumerType.Shared, - receiver_queue_size=0, - initial_position=InitialPosition.Earliest) - producer = client.create_producer('my-python-topic-consumer-init-queue-size-is-zero') - producer.send(b'hello') + consumer = client.subscribe( + "my-python-topic-consumer-init-queue-size-is-zero", + "my-sub", + consumer_type=ConsumerType.Shared, + receiver_queue_size=0, + initial_position=InitialPosition.Earliest, + ) + producer = client.create_producer("my-python-topic-consumer-init-queue-size-is-zero") + producer.send(b"hello") time.sleep(0.1) msg = consumer.receive() self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") consumer.unsubscribe() client.close() def test_message_properties(self): client = Client(self.serviceUrl) - topic = 'my-python-test-message-properties' - consumer = client.subscribe(topic=topic, - subscription_name='my-subscription', - schema=pulsar.schema.StringSchema()) - producer = client.create_producer(topic=topic, - schema=pulsar.schema.StringSchema()) - producer.send('hello', - properties={ - 'a': '1', - 'b': '2' - }) + topic = "my-python-test-message-properties" + consumer = client.subscribe( + topic=topic, subscription_name="my-subscription", schema=pulsar.schema.StringSchema() + ) + producer = client.create_producer(topic=topic, schema=pulsar.schema.StringSchema()) + producer.send("hello", properties={"a": "1", "b": "2"}) msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.value(), 'hello') - self.assertEqual(msg.properties(), { - 'a': '1', - 'b': '2' - }) + self.assertEqual(msg.value(), "hello") + self.assertEqual(msg.properties(), {"a": "1", "b": "2"}) consumer.unsubscribe() client.close() def test_tls_auth(self): - certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/' + certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" if not os.path.exists(certs_dir): certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" - client = Client(self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + 'cacert.pem', - tls_allow_insecure_connection=False, - authentication=AuthenticationTLS(certs_dir + 'client-cert.pem', certs_dir + 'client-key.pem')) - - topic = 'my-python-topic-tls-auth-' + str(time.time()) - consumer = client.subscribe(topic, - 'my-sub', - consumer_type=ConsumerType.Shared) + client = Client( + self.serviceUrlTls, + tls_trust_certs_file_path=certs_dir + "cacert.pem", + tls_allow_insecure_connection=False, + authentication=AuthenticationTLS(certs_dir + "client-cert.pem", certs_dir + "client-key.pem"), + ) + + topic = "my-python-topic-tls-auth-" + str(time.time()) + consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) producer = client.create_producer(topic) - producer.send(b'hello') + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") with self.assertRaises(pulsar.Timeout): consumer.receive(100) @@ -333,27 +339,27 @@ def test_tls_auth(self): client.close() def test_tls_auth2(self): - certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/' + certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" if not os.path.exists(certs_dir): certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" authPlugin = "org.apache.pulsar.client.impl.auth.AuthenticationTls" authParams = "tlsCertFile:%s/client-cert.pem,tlsKeyFile:%s/client-key.pem" % (certs_dir, certs_dir) - client = Client(self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + 'cacert.pem', - tls_allow_insecure_connection=False, - authentication=Authentication(authPlugin, authParams)) + client = Client( + self.serviceUrlTls, + tls_trust_certs_file_path=certs_dir + "cacert.pem", + tls_allow_insecure_connection=False, + authentication=Authentication(authPlugin, authParams), + ) - topic = 'my-python-topic-tls-auth-2-' + str(time.time()) - consumer = client.subscribe(topic, - 'my-sub', - consumer_type=ConsumerType.Shared) + topic = "my-python-topic-tls-auth-2-" + str(time.time()) + consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) producer = client.create_producer(topic) - producer.send(b'hello') + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") with self.assertRaises(pulsar.Timeout): consumer.receive(100) @@ -365,25 +371,25 @@ def test_encryption(self): privateKeyPath = "/pulsar/pulsar-broker/src/test/resources/certificate/private-key.client-rsa.pem" crypto_key_reader = CryptoKeyReader(publicKeyPath, privateKeyPath) client = Client(self.serviceUrl) - topic = 'my-python-test-end-to-end-encryption' - consumer = client.subscribe(topic=topic, - subscription_name='my-subscription', - crypto_key_reader=crypto_key_reader) - producer = client.create_producer(topic=topic, - encryption_key="client-rsa.pem", - crypto_key_reader=crypto_key_reader) - reader = client.create_reader(topic=topic, - start_message_id=MessageId.earliest, - crypto_key_reader=crypto_key_reader) - producer.send(b'hello') + topic = "my-python-test-end-to-end-encryption" + consumer = client.subscribe( + topic=topic, subscription_name="my-subscription", crypto_key_reader=crypto_key_reader + ) + producer = client.create_producer( + topic=topic, encryption_key="client-rsa.pem", crypto_key_reader=crypto_key_reader + ) + reader = client.create_reader( + topic=topic, start_message_id=MessageId.earliest, crypto_key_reader=crypto_key_reader + ) + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.value(), b'hello') + self.assertEqual(msg.value(), b"hello") consumer.unsubscribe() msg = reader.read_next(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") with self.assertRaises(pulsar.Timeout): reader.read_next(100) @@ -393,27 +399,27 @@ def test_encryption(self): client.close() def test_tls_auth3(self): - certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/' + certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" if not os.path.exists(certs_dir): certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" authPlugin = "tls" authParams = "tlsCertFile:%s/client-cert.pem,tlsKeyFile:%s/client-key.pem" % (certs_dir, certs_dir) - client = Client(self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + 'cacert.pem', - tls_allow_insecure_connection=False, - authentication=Authentication(authPlugin, authParams)) + client = Client( + self.serviceUrlTls, + tls_trust_certs_file_path=certs_dir + "cacert.pem", + tls_allow_insecure_connection=False, + authentication=Authentication(authPlugin, authParams), + ) - topic = 'my-python-topic-tls-auth-3-' + str(time.time()) - consumer = client.subscribe(topic, - 'my-sub', - consumer_type=ConsumerType.Shared) + topic = "my-python-topic-tls-auth-3-" + str(time.time()) + consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) producer = client.create_producer(topic) - producer.send(b'hello') + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") with self.assertRaises(pulsar.Timeout): consumer.receive(100) @@ -421,20 +427,20 @@ def test_tls_auth3(self): client.close() def test_auth_junk_params(self): - certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/' + certs_dir = "/pulsar/pulsar-broker/src/test/resources/authentication/tls/" if not os.path.exists(certs_dir): certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/" authPlugin = "someoldjunk.so" authParams = "blah" - client = Client(self.serviceUrlTls, - tls_trust_certs_file_path=certs_dir + 'cacert.pem', - tls_allow_insecure_connection=False, - authentication=Authentication(authPlugin, authParams)) + client = Client( + self.serviceUrlTls, + tls_trust_certs_file_path=certs_dir + "cacert.pem", + tls_allow_insecure_connection=False, + authentication=Authentication(authPlugin, authParams), + ) with self.assertRaises(pulsar.ConnectError): - client.subscribe('my-python-topic-auth-junk-params', - 'my-sub', - consumer_type=ConsumerType.Shared) + client.subscribe("my-python-topic-auth-junk-params", "my-sub", consumer_type=ConsumerType.Shared) def test_message_listener(self): client = Client(self.serviceUrl) @@ -446,14 +452,13 @@ def listener(consumer, msg): received_messages.append(msg) consumer.acknowledge(msg) - client.subscribe('my-python-topic-listener', - 'my-sub', - consumer_type=ConsumerType.Exclusive, - message_listener=listener) - producer = client.create_producer('my-python-topic-listener') - producer.send(b'hello-1') - producer.send(b'hello-2') - producer.send(b'hello-3') + client.subscribe( + "my-python-topic-listener", "my-sub", consumer_type=ConsumerType.Exclusive, message_listener=listener + ) + producer = client.create_producer("my-python-topic-listener") + producer.send(b"hello-1") + producer.send(b"hello-2") + producer.send(b"hello-3") time.sleep(0.1) self.assertEqual(len(received_messages), 3) @@ -464,15 +469,14 @@ def listener(consumer, msg): def test_reader_simple(self): client = Client(self.serviceUrl) - reader = client.create_reader('my-python-topic-reader-simple', - MessageId.earliest) + reader = client.create_reader("my-python-topic-reader-simple", MessageId.earliest) - producer = client.create_producer('my-python-topic-reader-simple') - producer.send(b'hello') + producer = client.create_producer("my-python-topic-reader-simple") + producer.send(b"hello") msg = reader.read_next(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") with self.assertRaises(pulsar.Timeout): reader.read_next(100) @@ -482,21 +486,20 @@ def test_reader_simple(self): def test_reader_on_last_message(self): client = Client(self.serviceUrl) - producer = client.create_producer('my-python-topic-reader-on-last-message') + producer = client.create_producer("my-python-topic-reader-on-last-message") for i in range(10): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) - reader = client.create_reader('my-python-topic-reader-on-last-message', - MessageId.latest) + reader = client.create_reader("my-python-topic-reader-on-last-message", MessageId.latest) for i in range(10, 20): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) for i in range(10, 20): msg = reader.read_next(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) reader.close() client.close() @@ -504,26 +507,21 @@ def test_reader_on_last_message(self): def test_reader_on_specific_message(self): num_of_msgs = 10 client = Client(self.serviceUrl) - producer = client.create_producer( - 'my-python-topic-reader-on-specific-message') + producer = client.create_producer("my-python-topic-reader-on-specific-message") for i in range(num_of_msgs): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) - reader1 = client.create_reader( - 'my-python-topic-reader-on-specific-message', - MessageId.earliest) + reader1 = client.create_reader("my-python-topic-reader-on-specific-message", MessageId.earliest) - for i in range(num_of_msgs//2): + for i in range(num_of_msgs // 2): msg = reader1.read_next(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) last_msg_id = msg.message_id() last_msg_idx = i - reader2 = client.create_reader( - 'my-python-topic-reader-on-specific-message', - last_msg_id) + reader2 = client.create_reader("my-python-topic-reader-on-specific-message", last_msg_id) # The reset would be effectively done on the next position relative to reset. # When available, we should test this behaviour with `startMessageIdInclusive` opt. @@ -531,7 +529,7 @@ def test_reader_on_specific_message(self): for i in range(from_msg_idx, num_of_msgs): msg = reader2.read_next(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) reader1.close() reader2.close() @@ -540,32 +538,29 @@ def test_reader_on_specific_message(self): def test_reader_on_specific_message_with_batches(self): client = Client(self.serviceUrl) producer = client.create_producer( - 'my-python-topic-reader-on-specific-message-with-batches', + "my-python-topic-reader-on-specific-message-with-batches", batching_enabled=True, - batching_max_publish_delay_ms=1000) + batching_max_publish_delay_ms=1000, + ) for i in range(10): - producer.send_async(b'hello-%d' % i, None) + producer.send_async(b"hello-%d" % i, None) # Send one sync message to make sure everything was published - producer.send(b'hello-10') + producer.send(b"hello-10") - reader1 = client.create_reader( - 'my-python-topic-reader-on-specific-message-with-batches', - MessageId.earliest) + reader1 = client.create_reader("my-python-topic-reader-on-specific-message-with-batches", MessageId.earliest) for i in range(5): msg = reader1.read_next(TM) last_msg_id = msg.message_id() - reader2 = client.create_reader( - 'my-python-topic-reader-on-specific-message-with-batches', - last_msg_id) + reader2 = client.create_reader("my-python-topic-reader-on-specific-message-with-batches", last_msg_id) for i in range(5, 11): msg = reader2.read_next(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) reader1.close() reader2.close() @@ -573,60 +568,56 @@ def test_reader_on_specific_message_with_batches(self): def test_producer_sequence_after_reconnection(self): # Enable deduplication on namespace - doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication', - 'true') + doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "true") client = Client(self.serviceUrl) - topic = 'my-python-test-producer-sequence-after-reconnection-' \ - + str(time.time()) + topic = "my-python-test-producer-sequence-after-reconnection-" + str(time.time()) - producer = client.create_producer(topic, producer_name='my-producer-name') + producer = client.create_producer(topic, producer_name="my-producer-name") self.assertEqual(producer.last_sequence_id(), -1) for i in range(10): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) self.assertEqual(producer.last_sequence_id(), i) producer.close() - producer = client.create_producer(topic, producer_name='my-producer-name') + producer = client.create_producer(topic, producer_name="my-producer-name") self.assertEqual(producer.last_sequence_id(), 9) for i in range(10, 20): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) self.assertEqual(producer.last_sequence_id(), i) client.close() - doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication', - 'false') + doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "false") def test_producer_deduplication(self): # Enable deduplication on namespace - doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication', - 'true') + doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "true") client = Client(self.serviceUrl) - topic = 'my-python-test-producer-deduplication-' + str(time.time()) + topic = "my-python-test-producer-deduplication-" + str(time.time()) - producer = client.create_producer(topic, producer_name='my-producer-name') + producer = client.create_producer(topic, producer_name="my-producer-name") self.assertEqual(producer.last_sequence_id(), -1) - consumer = client.subscribe(topic, 'my-sub') + consumer = client.subscribe(topic, "my-sub") - producer.send(b'hello-0', sequence_id=0) - producer.send(b'hello-1', sequence_id=1) - producer.send(b'hello-2', sequence_id=2) + producer.send(b"hello-0", sequence_id=0) + producer.send(b"hello-1", sequence_id=1) + producer.send(b"hello-2", sequence_id=2) self.assertEqual(producer.last_sequence_id(), 2) # Repeat the messages and verify they're not received by consumer - producer.send(b'hello-1', sequence_id=1) - producer.send(b'hello-2', sequence_id=2) + producer.send(b"hello-1", sequence_id=1) + producer.send(b"hello-2", sequence_id=2) self.assertEqual(producer.last_sequence_id(), 2) for i in range(3): msg = consumer.receive(TM) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) consumer.acknowledge(msg) with self.assertRaises(pulsar.Timeout): @@ -634,12 +625,12 @@ def test_producer_deduplication(self): producer.close() - producer = client.create_producer(topic, producer_name='my-producer-name') + producer = client.create_producer(topic, producer_name="my-producer-name") self.assertEqual(producer.last_sequence_id(), 2) # Repeat the messages and verify they're not received by consumer - producer.send(b'hello-1', sequence_id=1) - producer.send(b'hello-2', sequence_id=2) + producer.send(b"hello-1", sequence_id=1) + producer.send(b"hello-2", sequence_id=2) self.assertEqual(producer.last_sequence_id(), 2) with self.assertRaises(pulsar.Timeout): @@ -647,32 +638,32 @@ def test_producer_deduplication(self): client.close() - doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication', - 'false') + doHttpPost(self.adminUrl + "/admin/v2/namespaces/public/default/deduplication", "false") def test_producer_routing_mode(self): client = Client(self.serviceUrl) - producer = client.create_producer('my-python-test-producer', - message_routing_mode=PartitionsRoutingMode.UseSinglePartition) - producer.send(b'test') + producer = client.create_producer( + "my-python-test-producer", message_routing_mode=PartitionsRoutingMode.UseSinglePartition + ) + producer.send(b"test") client.close() def test_message_argument_errors(self): client = Client(self.serviceUrl) - topic = 'my-python-test-producer' + topic = "my-python-test-producer" producer = client.create_producer(topic) - content = 'test'.encode('utf-8') + content = "test".encode("utf-8") self._check_type_error(lambda: producer.send(5)) - self._check_value_error(lambda: producer.send(content, properties='test')) + self._check_value_error(lambda: producer.send(content, properties="test")) self._check_value_error(lambda: producer.send(content, partition_key=5)) - self._check_value_error(lambda: producer.send(content, sequence_id='test')) + self._check_value_error(lambda: producer.send(content, sequence_id="test")) self._check_value_error(lambda: producer.send(content, replication_clusters=5)) - self._check_value_error(lambda: producer.send(content, disable_replication='test')) - self._check_value_error(lambda: producer.send(content, event_timestamp='test')) - self._check_value_error(lambda: producer.send(content, deliver_at='test')) - self._check_value_error(lambda: producer.send(content, deliver_after='test')) + self._check_value_error(lambda: producer.send(content, disable_replication="test")) + self._check_value_error(lambda: producer.send(content, event_timestamp="test")) + self._check_value_error(lambda: producer.send(content, deliver_at="test")) + self._check_value_error(lambda: producer.send(content, deliver_after="test")) client.close() def test_client_argument_errors(self): @@ -692,75 +683,75 @@ def test_producer_argument_errors(self): self._check_value_error(lambda: client.create_producer(None)) - topic = 'my-python-test-producer' + topic = "my-python-test-producer" self._check_value_error(lambda: client.create_producer(topic, producer_name=5)) - self._check_value_error(lambda: client.create_producer(topic, initial_sequence_id='test')) - self._check_value_error(lambda: client.create_producer(topic, send_timeout_millis='test')) + self._check_value_error(lambda: client.create_producer(topic, initial_sequence_id="test")) + self._check_value_error(lambda: client.create_producer(topic, send_timeout_millis="test")) self._check_value_error(lambda: client.create_producer(topic, compression_type=None)) - self._check_value_error(lambda: client.create_producer(topic, max_pending_messages='test')) - self._check_value_error(lambda: client.create_producer(topic, block_if_queue_full='test')) - self._check_value_error(lambda: client.create_producer(topic, batching_enabled='test')) - self._check_value_error(lambda: client.create_producer(topic, batching_enabled='test')) - self._check_value_error(lambda: client.create_producer(topic, batching_max_allowed_size_in_bytes='test')) - self._check_value_error(lambda: client.create_producer(topic, batching_max_publish_delay_ms='test')) + self._check_value_error(lambda: client.create_producer(topic, max_pending_messages="test")) + self._check_value_error(lambda: client.create_producer(topic, block_if_queue_full="test")) + self._check_value_error(lambda: client.create_producer(topic, batching_enabled="test")) + self._check_value_error(lambda: client.create_producer(topic, batching_enabled="test")) + self._check_value_error(lambda: client.create_producer(topic, batching_max_allowed_size_in_bytes="test")) + self._check_value_error(lambda: client.create_producer(topic, batching_max_publish_delay_ms="test")) client.close() def test_consumer_argument_errors(self): client = Client(self.serviceUrl) - topic = 'my-python-test-producer' - sub_name = 'my-sub-name' + topic = "my-python-test-producer" + sub_name = "my-sub-name" self._check_value_error(lambda: client.subscribe(None, sub_name)) self._check_value_error(lambda: client.subscribe(topic, None)) self._check_value_error(lambda: client.subscribe(topic, sub_name, consumer_type=None)) - self._check_value_error(lambda: client.subscribe(topic, sub_name, receiver_queue_size='test')) + self._check_value_error(lambda: client.subscribe(topic, sub_name, receiver_queue_size="test")) self._check_value_error(lambda: client.subscribe(topic, sub_name, consumer_name=5)) - self._check_value_error(lambda: client.subscribe(topic, sub_name, unacked_messages_timeout_ms='test')) - self._check_value_error(lambda: client.subscribe(topic, sub_name, broker_consumer_stats_cache_time_ms='test')) + self._check_value_error(lambda: client.subscribe(topic, sub_name, unacked_messages_timeout_ms="test")) + self._check_value_error(lambda: client.subscribe(topic, sub_name, broker_consumer_stats_cache_time_ms="test")) client.close() def test_reader_argument_errors(self): client = Client(self.serviceUrl) - topic = 'my-python-test-producer' + topic = "my-python-test-producer" # This should not raise exception client.create_reader(topic, MessageId.earliest) self._check_value_error(lambda: client.create_reader(None, MessageId.earliest)) self._check_value_error(lambda: client.create_reader(topic, None)) - self._check_value_error(lambda: client.create_reader(topic, MessageId.earliest, receiver_queue_size='test')) + self._check_value_error(lambda: client.create_reader(topic, MessageId.earliest, receiver_queue_size="test")) self._check_value_error(lambda: client.create_reader(topic, MessageId.earliest, reader_name=5)) client.close() def test_publish_compact_and_consume(self): client = Client(self.serviceUrl) - topic = 'compaction_%s' % (uuid.uuid4()) - producer = client.create_producer(topic, producer_name='my-producer-name', batching_enabled=False) + topic = "compaction_%s" % (uuid.uuid4()) + producer = client.create_producer(topic, producer_name="my-producer-name", batching_enabled=False) self.assertEqual(producer.last_sequence_id(), -1) - consumer = client.subscribe(topic, 'my-sub1', is_read_compacted=True) + consumer = client.subscribe(topic, "my-sub1", is_read_compacted=True) consumer.close() - consumer2 = client.subscribe(topic, 'my-sub2', is_read_compacted=False) + consumer2 = client.subscribe(topic, "my-sub2", is_read_compacted=False) # producer create 2 messages with same key. - producer.send(b'hello-0', partition_key='key0') - producer.send(b'hello-1', partition_key='key0') + producer.send(b"hello-0", partition_key="key0") + producer.send(b"hello-1", partition_key="key0") producer.close() # issue compact command, and wait success - url='%s/admin/v2/persistent/public/default/%s/compaction' % (self.adminUrl, topic) - doHttpPut(url, '') + url = "%s/admin/v2/persistent/public/default/%s/compaction" % (self.adminUrl, topic) + doHttpPut(url, "") while True: - s=doHttpGet(url).decode('utf-8') - if 'RUNNING' in s: + s = doHttpGet(url).decode("utf-8") + if "RUNNING" in s: print(s) print("Compact still running") time.sleep(0.2) else: print(s) print("Compact Complete now") - self.assertTrue('SUCCESS' in s) + self.assertTrue("SUCCESS" in s) break # after compaction completes the compacted ledger is recorded @@ -772,87 +763,84 @@ def test_publish_compact_and_consume(self): time.sleep(1.0) # after compact, consumer with `is_read_compacted=True`, expected read only the second message for same key. - consumer1 = client.subscribe(topic, 'my-sub1', is_read_compacted=True) + consumer1 = client.subscribe(topic, "my-sub1", is_read_compacted=True) msg0 = consumer1.receive(TM) - self.assertEqual(msg0.data(), b'hello-1') + self.assertEqual(msg0.data(), b"hello-1") consumer1.acknowledge(msg0) consumer1.close() # ditto for reader reader1 = client.create_reader(topic, MessageId.earliest, is_read_compacted=True) msg0 = reader1.read_next(TM) - self.assertEqual(msg0.data(), b'hello-1') + self.assertEqual(msg0.data(), b"hello-1") reader1.close() # after compact, consumer with `is_read_compacted=False`, expected read 2 messages for same key. msg0 = consumer2.receive(TM) - self.assertEqual(msg0.data(), b'hello-0') + self.assertEqual(msg0.data(), b"hello-0") consumer2.acknowledge(msg0) msg1 = consumer2.receive(TM) - self.assertEqual(msg1.data(), b'hello-1') + self.assertEqual(msg1.data(), b"hello-1") consumer2.acknowledge(msg1) consumer2.close() # ditto for reader reader2 = client.create_reader(topic, MessageId.earliest, is_read_compacted=False) msg0 = reader2.read_next(TM) - self.assertEqual(msg0.data(), b'hello-0') + self.assertEqual(msg0.data(), b"hello-0") msg1 = reader2.read_next(TM) - self.assertEqual(msg1.data(), b'hello-1') + self.assertEqual(msg1.data(), b"hello-1") reader2.close() client.close() def test_reader_has_message_available(self): # create client, producer, reader client = Client(self.serviceUrl) - producer = client.create_producer('my-python-topic-reader-has-message-available') - reader = client.create_reader('my-python-topic-reader-has-message-available', - MessageId.latest) + producer = client.create_producer("my-python-topic-reader-has-message-available") + reader = client.create_reader("my-python-topic-reader-has-message-available", MessageId.latest) # before produce data, expected not has message available - self.assertFalse(reader.has_message_available()); + self.assertFalse(reader.has_message_available()) for i in range(10): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) # produced data, expected has message available - self.assertTrue(reader.has_message_available()); + self.assertTrue(reader.has_message_available()) for i in range(10): msg = reader.read_next(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) # consumed all data, expected not has message available - self.assertFalse(reader.has_message_available()); + self.assertFalse(reader.has_message_available()) for i in range(10, 20): - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) # produced data again, expected has message available - self.assertTrue(reader.has_message_available()); + self.assertTrue(reader.has_message_available()) reader.close() producer.close() client.close() def test_seek(self): client = Client(self.serviceUrl) - topic = 'my-python-topic-seek-' + str(time.time()) - consumer = client.subscribe(topic, - 'my-sub', - consumer_type=ConsumerType.Shared) + topic = "my-python-topic-seek-" + str(time.time()) + consumer = client.subscribe(topic, "my-sub", consumer_type=ConsumerType.Shared) producer = client.create_producer(topic) for i in range(100): if i > 0: time.sleep(0.02) - producer.send(b'hello-%d' % i) + producer.send(b"hello-%d" % i) ids = [] timestamps = [] for i in range(100): msg = consumer.receive(TM) - self.assertEqual(msg.data(), b'hello-%d' % i) + self.assertEqual(msg.data(), b"hello-%d" % i) ids.append(msg.message_id()) timestamps.append(msg.publish_timestamp()) consumer.acknowledge(msg) @@ -861,19 +849,19 @@ def test_seek(self): consumer.seek(MessageId.earliest) time.sleep(0.5) msg = consumer.receive(TM) - self.assertEqual(msg.data(), b'hello-0') + self.assertEqual(msg.data(), b"hello-0") # seek on messageId consumer.seek(ids[50]) time.sleep(0.5) msg = consumer.receive(TM) - self.assertEqual(msg.data(), b'hello-50') + self.assertEqual(msg.data(), b"hello-50") # ditto, but seek on timestamp consumer.seek(timestamps[42]) time.sleep(0.5) msg = consumer.receive(TM) - self.assertEqual(msg.data(), b'hello-42') + self.assertEqual(msg.data(), b"hello-42") # repeat with reader reader = client.create_reader(topic, MessageId.latest) @@ -884,25 +872,25 @@ def test_seek(self): reader.seek(MessageId.earliest) time.sleep(0.5) msg = reader.read_next(TM) - self.assertEqual(msg.data(), b'hello-0') + self.assertEqual(msg.data(), b"hello-0") msg = reader.read_next(TM) - self.assertEqual(msg.data(), b'hello-1') + self.assertEqual(msg.data(), b"hello-1") # seek on messageId reader.seek(ids[33]) time.sleep(0.5) msg = reader.read_next(TM) - self.assertEqual(msg.data(), b'hello-33') + self.assertEqual(msg.data(), b"hello-33") msg = reader.read_next(TM) - self.assertEqual(msg.data(), b'hello-34') + self.assertEqual(msg.data(), b"hello-34") # seek on timestamp reader.seek(timestamps[79]) time.sleep(0.5) msg = reader.read_next(TM) - self.assertEqual(msg.data(), b'hello-79') + self.assertEqual(msg.data(), b"hello-79") msg = reader.read_next(TM) - self.assertEqual(msg.data(), b'hello-80') + self.assertEqual(msg.data(), b"hello-80") reader.close() client.close() @@ -915,15 +903,13 @@ def test_v2_topics_http(self): def _v2_topics(self, url): client = Client(url) - consumer = client.subscribe('my-v2-topic-producer-consumer', - 'my-sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('my-v2-topic-producer-consumer') - producer.send(b'hello') + consumer = client.subscribe("my-v2-topic-producer-consumer", "my-sub", consumer_type=ConsumerType.Shared) + producer = client.create_producer("my-v2-topic-producer-consumer") + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") consumer.acknowledge(msg) with self.assertRaises(pulsar.Timeout): @@ -933,38 +919,35 @@ def _v2_topics(self, url): def test_topics_consumer(self): client = Client(self.serviceUrl) - topic1 = 'persistent://public/default/my-python-topics-consumer-1' - topic2 = 'persistent://public/default/my-python-topics-consumer-2' - topic3 = 'persistent://public/default-2/my-python-topics-consumer-3' # topic from different namespace + topic1 = "persistent://public/default/my-python-topics-consumer-1" + topic2 = "persistent://public/default/my-python-topics-consumer-2" + topic3 = "persistent://public/default-2/my-python-topics-consumer-3" # topic from different namespace topics = [topic1, topic2, topic3] - url1 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-topics-consumer-1/partitions' - url2 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-topics-consumer-2/partitions' - url3 = self.adminUrl + '/admin/v2/persistent/public/default-2/my-python-topics-consumer-3/partitions' + url1 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-topics-consumer-1/partitions" + url2 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-topics-consumer-2/partitions" + url3 = self.adminUrl + "/admin/v2/persistent/public/default-2/my-python-topics-consumer-3/partitions" - doHttpPut(url1, '2') - doHttpPut(url2, '3') - doHttpPut(url3, '4') + doHttpPut(url1, "2") + doHttpPut(url2, "3") + doHttpPut(url3, "4") producer1 = client.create_producer(topic1) producer2 = client.create_producer(topic2) producer3 = client.create_producer(topic3) - consumer = client.subscribe(topics, - 'my-topics-consumer-sub', - consumer_type=ConsumerType.Shared, - receiver_queue_size=10 - ) + consumer = client.subscribe( + topics, "my-topics-consumer-sub", consumer_type=ConsumerType.Shared, receiver_queue_size=10 + ) for i in range(100): - producer1.send(b'hello-1-%d' % i) + producer1.send(b"hello-1-%d" % i) for i in range(100): - producer2.send(b'hello-2-%d' % i) + producer2.send(b"hello-2-%d" % i) for i in range(100): - producer3.send(b'hello-3-%d' % i) - + producer3.send(b"hello-3-%d" % i) for i in range(300): msg = consumer.receive(TM) @@ -976,45 +959,46 @@ def test_topics_consumer(self): def test_topics_pattern_consumer(self): import re + client = Client(self.serviceUrl) - topics_pattern = 'persistent://public/default/my-python-pattern-consumer.*' + topics_pattern = "persistent://public/default/my-python-pattern-consumer.*" - topic1 = 'persistent://public/default/my-python-pattern-consumer-1' - topic2 = 'persistent://public/default/my-python-pattern-consumer-2' - topic3 = 'persistent://public/default/my-python-pattern-consumer-3' + topic1 = "persistent://public/default/my-python-pattern-consumer-1" + topic2 = "persistent://public/default/my-python-pattern-consumer-2" + topic3 = "persistent://public/default/my-python-pattern-consumer-3" - url1 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-pattern-consumer-1/partitions' - url2 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-pattern-consumer-2/partitions' - url3 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-pattern-consumer-3/partitions' + url1 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-pattern-consumer-1/partitions" + url2 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-pattern-consumer-2/partitions" + url3 = self.adminUrl + "/admin/v2/persistent/public/default/my-python-pattern-consumer-3/partitions" - doHttpPut(url1, '2') - doHttpPut(url2, '3') - doHttpPut(url3, '4') + doHttpPut(url1, "2") + doHttpPut(url2, "3") + doHttpPut(url3, "4") producer1 = client.create_producer(topic1) producer2 = client.create_producer(topic2) producer3 = client.create_producer(topic3) - consumer = client.subscribe(re.compile(topics_pattern), - 'my-pattern-consumer-sub', - consumer_type = ConsumerType.Shared, - receiver_queue_size = 10, - pattern_auto_discovery_period = 1 - ) + consumer = client.subscribe( + re.compile(topics_pattern), + "my-pattern-consumer-sub", + consumer_type=ConsumerType.Shared, + receiver_queue_size=10, + pattern_auto_discovery_period=1, + ) # wait enough time to trigger auto discovery time.sleep(2) for i in range(100): - producer1.send(b'hello-1-%d' % i) + producer1.send(b"hello-1-%d" % i) for i in range(100): - producer2.send(b'hello-2-%d' % i) + producer2.send(b"hello-2-%d" % i) for i in range(100): - producer3.send(b'hello-3-%d' % i) - + producer3.send(b"hello-3-%d" % i) for i in range(300): msg = consumer.receive(TM) @@ -1033,70 +1017,72 @@ def test_message_id(self): def test_get_topics_partitions(self): client = Client(self.serviceUrl) - topic_partitioned = 'persistent://public/default/test_get_topics_partitions' - topic_non_partitioned = 'persistent://public/default/test_get_topics_not-partitioned' - - url1 = self.adminUrl + '/admin/v2/persistent/public/default/test_get_topics_partitions/partitions' - doHttpPut(url1, '3') - - self.assertEqual(client.get_topic_partitions(topic_partitioned), - ['persistent://public/default/test_get_topics_partitions-partition-0', - 'persistent://public/default/test_get_topics_partitions-partition-1', - 'persistent://public/default/test_get_topics_partitions-partition-2']) + topic_partitioned = "persistent://public/default/test_get_topics_partitions" + topic_non_partitioned = "persistent://public/default/test_get_topics_not-partitioned" + + url1 = self.adminUrl + "/admin/v2/persistent/public/default/test_get_topics_partitions/partitions" + doHttpPut(url1, "3") + + self.assertEqual( + client.get_topic_partitions(topic_partitioned), + [ + "persistent://public/default/test_get_topics_partitions-partition-0", + "persistent://public/default/test_get_topics_partitions-partition-1", + "persistent://public/default/test_get_topics_partitions-partition-2", + ], + ) - self.assertEqual(client.get_topic_partitions(topic_non_partitioned), - [topic_non_partitioned]) + self.assertEqual(client.get_topic_partitions(topic_non_partitioned), [topic_non_partitioned]) client.close() def test_token_auth(self): - with open('/tmp/pulsar-test-data/tokens/token.txt') as tf: + with open("/tmp/pulsar-test-data/tokens/token.txt") as tf: token = tf.read().strip() # Use adminUrl to test both HTTP request and binary protocol - client = Client(self.adminUrl, - authentication=AuthenticationToken(token)) + client = Client(self.adminUrl, authentication=AuthenticationToken(token)) - consumer = client.subscribe('persistent://private/auth/my-python-topic-token-auth', - 'my-sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('persistent://private/auth/my-python-topic-token-auth') - producer.send(b'hello') + consumer = client.subscribe( + "persistent://private/auth/my-python-topic-token-auth", "my-sub", consumer_type=ConsumerType.Shared + ) + producer = client.create_producer("persistent://private/auth/my-python-topic-token-auth") + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") client.close() def test_token_auth_supplier(self): def read_token(): - with open('/tmp/pulsar-test-data/tokens/token.txt') as tf: + with open("/tmp/pulsar-test-data/tokens/token.txt") as tf: return tf.read().strip() - client = Client(self.serviceUrl, - authentication=AuthenticationToken(read_token)) - consumer = client.subscribe('persistent://private/auth/my-python-topic-token-auth', - 'my-sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('persistent://private/auth/my-python-topic-token-auth') - producer.send(b'hello') + client = Client(self.serviceUrl, authentication=AuthenticationToken(read_token)) + consumer = client.subscribe( + "persistent://private/auth/my-python-topic-token-auth", "my-sub", consumer_type=ConsumerType.Shared + ) + producer = client.create_producer("persistent://private/auth/my-python-topic-token-auth") + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") client.close() def test_producer_consumer_zstd(self): client = Client(self.serviceUrl) - consumer = client.subscribe('my-python-topic-producer-consumer-zstd', - 'my-sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('my-python-topic-producer-consumer-zstd', - compression_type=CompressionType.ZSTD) - producer.send(b'hello') + consumer = client.subscribe( + "my-python-topic-producer-consumer-zstd", "my-sub", consumer_type=ConsumerType.Shared + ) + producer = client.create_producer( + "my-python-topic-producer-consumer-zstd", compression_type=CompressionType.ZSTD + ) + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg) - self.assertEqual(msg.data(), b'hello') + self.assertEqual(msg.data(), b"hello") with self.assertRaises(pulsar.Timeout): consumer.receive(100) @@ -1107,41 +1093,46 @@ def test_producer_consumer_zstd(self): def test_client_reference_deleted(self): def get_producer(): cl = Client(self.serviceUrl) - return cl.create_producer(topic='foobar') + return cl.create_producer(topic="foobar") producer = get_producer() - producer.send(b'test_payload') + producer.send(b"test_payload") ##### def test_get_topic_name(self): client = Client(self.serviceUrl) - consumer = client.subscribe('persistent://public/default/topic_name_test', - 'topic_name_test_sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('persistent://public/default/topic_name_test') - producer.send(b'hello') + consumer = client.subscribe( + "persistent://public/default/topic_name_test", "topic_name_test_sub", consumer_type=ConsumerType.Shared + ) + producer = client.create_producer("persistent://public/default/topic_name_test") + producer.send(b"hello") msg = consumer.receive(TM) - self.assertEqual(msg.topic_name(), 'persistent://public/default/topic_name_test') + self.assertEqual(msg.topic_name(), "persistent://public/default/topic_name_test") client.close() def test_get_partitioned_topic_name(self): client = Client(self.serviceUrl) - url1 = self.adminUrl + '/admin/v2/persistent/public/default/partitioned_topic_name_test/partitions' - doHttpPut(url1, '3') - - partitions = ['persistent://public/default/partitioned_topic_name_test-partition-0', - 'persistent://public/default/partitioned_topic_name_test-partition-1', - 'persistent://public/default/partitioned_topic_name_test-partition-2'] - self.assertEqual(client.get_topic_partitions('persistent://public/default/partitioned_topic_name_test'), - partitions) + url1 = self.adminUrl + "/admin/v2/persistent/public/default/partitioned_topic_name_test/partitions" + doHttpPut(url1, "3") + + partitions = [ + "persistent://public/default/partitioned_topic_name_test-partition-0", + "persistent://public/default/partitioned_topic_name_test-partition-1", + "persistent://public/default/partitioned_topic_name_test-partition-2", + ] + self.assertEqual( + client.get_topic_partitions("persistent://public/default/partitioned_topic_name_test"), partitions + ) - consumer = client.subscribe('persistent://public/default/partitioned_topic_name_test', - 'partitioned_topic_name_test_sub', - consumer_type=ConsumerType.Shared) - producer = client.create_producer('persistent://public/default/partitioned_topic_name_test') - producer.send(b'hello') + consumer = client.subscribe( + "persistent://public/default/partitioned_topic_name_test", + "partitioned_topic_name_test_sub", + consumer_type=ConsumerType.Shared, + ) + producer = client.create_producer("persistent://public/default/partitioned_topic_name_test") + producer.send(b"hello") msg = consumer.receive(TM) self.assertTrue(msg.topic_name() in partitions) @@ -1149,12 +1140,12 @@ def test_get_partitioned_topic_name(self): def test_shutdown_client(self): client = Client(self.serviceUrl) - producer = client.create_producer('persistent://public/default/partitioned_topic_name_test') - producer.send(b'hello') + producer = client.create_producer("persistent://public/default/partitioned_topic_name_test") + producer.send(b"hello") client.shutdown() try: - producer.send(b'hello') + producer.send(b"hello") self.assertTrue(False) except pulsar.PulsarException: # Expected @@ -1162,14 +1153,12 @@ def test_shutdown_client(self): def test_negative_acks(self): client = Client(self.serviceUrl) - consumer = client.subscribe('test_negative_acks', - 'test', - schema=pulsar.schema.StringSchema(), - negative_ack_redelivery_delay_ms=1000) - producer = client.create_producer('test_negative_acks', - schema=pulsar.schema.StringSchema()) + consumer = client.subscribe( + "test_negative_acks", "test", schema=pulsar.schema.StringSchema(), negative_ack_redelivery_delay_ms=1000 + ) + producer = client.create_producer("test_negative_acks", schema=pulsar.schema.StringSchema()) for i in range(10): - producer.send_async('hello-%d' % i, callback=None) + producer.send_async("hello-%d" % i, callback=None) producer.flush() @@ -1189,20 +1178,28 @@ def test_negative_acks(self): def test_connect_timeout(self): client = pulsar.Client( - service_url='pulsar://192.0.2.1:1234', - connection_timeout_ms=1000, # 1 second + service_url="pulsar://192.0.2.1:1234", + connection_timeout_ms=1000, # 1 second ) t1 = time.time() try: - producer = client.create_producer('test_connect_timeout') - self.fail('create_producer should not succeed') + producer = client.create_producer("test_connect_timeout") + self.fail("create_producer should not succeed") except pulsar.ConnectError as expected: - print('expected error: {} when create producer'.format(expected)) + print("expected error: {} when create producer".format(expected)) t2 = time.time() self.assertGreater(t2 - t1, 1.0) - self.assertLess(t2 - t1, 1.5) # 1.5 seconds is long enough + self.assertLess(t2 - t1, 1.5) # 1.5 seconds is long enough client.close() + def test_json_schema_encode(self): + schema = JsonSchema(TestRecord) + record = TestRecord(a=1, b=2) + # Ensure that encoding a JsonSchema more than once works and produces the same result + first_encode = schema.encode(record) + second_encode = schema.encode(record) + self.assertEqual(first_encode, second_encode) + def _check_value_error(self, fun): with self.assertRaises(ValueError): fun() @@ -1212,5 +1209,5 @@ def _check_type_error(self, fun): fun() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pulsar-client-cpp/python/schema_test.py b/pulsar-client-cpp/python/schema_test.py index 7adbcbe50e852..077f2bb076387 100755 --- a/pulsar-client-cpp/python/schema_test.py +++ b/pulsar-client-cpp/python/schema_test.py @@ -25,6 +25,7 @@ from pulsar.schema import * from enum import Enum import json +from fastavro.schema import load_schema class SchemaTest(TestCase): @@ -48,6 +49,7 @@ class Example(Record): g = Double() h = Bytes() i = Map(String()) + j = CustomEnum(Color) fastavro.parse_schema(Example.schema()) self.assertEqual(Example.schema(), { @@ -73,16 +75,23 @@ class Example(Record): {"name": "i", "type": ["null", { "type": "map", "values": "string"}] - }, + }, + {"name": "j", "type": ["null", "Color"]} ] }) def test_complex(self): + class Color(Enum): + red = 1 + green = 2 + blue = 3 + class MySubRecord(Record): _sorted_fields = True x = Integer() y = Long() z = String() + color = CustomEnum(Color) class Example(Record): _sorted_fields = True @@ -100,9 +109,12 @@ class Example(Record): "type": ["null", { "name": "MySubRecord", "type": "record", - "fields": [{"name": "x", "type": ["null", "int"]}, - {"name": "y", "type": ["null", "long"]}, - {"name": "z", "type": ["null", "string"]}] + "fields": [ + {'name': 'color', 'type': ['null', {'type': 'enum', 'name': 'Color', 'symbols': + ['red', 'green', 'blue']}]}, + {"name": "x", "type": ["null", "int"]}, + {"name": "y", "type": ["null", "long"]}, + {"name": "z", "type": ["null", "string"]}] }] }, {"name": "sub2", @@ -629,6 +641,8 @@ class MyEnum(Enum): class Example(Record): name = String() v = MyEnum + w = CustomEnum(MyEnum) + x = CustomEnum(MyEnum, required=True, default=MyEnum.A, required_default=True) topic = 'my-json-enum-topic' @@ -640,13 +654,15 @@ class Example(Record): consumer = client.subscribe(topic, 'test', schema=JsonSchema(Example)) - r = Example(name='test', v=MyEnum.C) + r = Example(name='test', v=MyEnum.C, w=MyEnum.B) producer.send(r) msg = consumer.receive() self.assertEqual('test', msg.value().name) self.assertEqual(MyEnum.C, MyEnum(msg.value().v)) + self.assertEqual(MyEnum.B, MyEnum(msg.value().w)) + self.assertEqual(MyEnum.A, MyEnum(msg.value().x)) client.close() def test_avro_enum(self): @@ -658,6 +674,8 @@ class MyEnum(Enum): class Example(Record): name = String() v = MyEnum + w = CustomEnum(MyEnum) + x = CustomEnum(MyEnum, required=True, default=MyEnum.B, required_default=True) topic = 'my-avro-enum-topic' @@ -669,12 +687,14 @@ class Example(Record): consumer = client.subscribe(topic, 'test', schema=AvroSchema(Example)) - r = Example(name='test', v=MyEnum.C) + r = Example(name='test', v=MyEnum.C, w=MyEnum.A) producer.send(r) msg = consumer.receive() msg.value() self.assertEqual(MyEnum.C, msg.value().v) + self.assertEqual(MyEnum.A, MyEnum(msg.value().w)) + self.assertEqual(MyEnum.B, MyEnum(msg.value().x)) client.close() def test_avro_map_array(self): @@ -912,6 +932,11 @@ class MyRecord(Record): client.close() def test_serialize_schema_complex(self): + class Color(Enum): + red = 1 + green = 2 + blue = 3 + class NestedObj1(Record): _sorted_fields = True na1 = String() @@ -924,6 +949,8 @@ class NestedObj2(Record): nc2 = NestedObj1() class NestedObj3(Record): + _sorted_fields = True + color = CustomEnum(Color) na3 = Integer() class NestedObj4(Record): @@ -932,11 +959,6 @@ class NestedObj4(Record): na4 = String() nb4 = Integer() - class Color(Enum): - red = 1 - green = 2 - blue = 3 - class ComplexRecord(Record): _avro_namespace = 'xxx.xxx' _sorted_fields = True @@ -944,6 +966,7 @@ class ComplexRecord(Record): b = Integer() color = Color color2 = Color + color3 = CustomEnum(Color, required=True, default=Color.red, required_default=True) nested = NestedObj2() nested2 = NestedObj2() mapNested = Map(NestedObj3()) @@ -969,8 +992,10 @@ class ComplexRecord(Record): {'name': 'color', 'type': ['null', {'type': 'enum', 'name': 'Color', 'symbols': [ 'red', 'green', 'blue']}]}, {'name': 'color2', 'type': ['null', 'Color']}, + {'name': 'color3', 'default': 'red', 'type': 'Color'}, {'name': 'mapNested', 'type': ['null', {'type': 'map', 'values': {'name': 'NestedObj3', 'type': 'record', 'fields': [ + {'name': 'color', 'type': ['null', 'Color']}, {'name': 'na3', 'type': ['null', 'int']} ]}} ]}, @@ -997,12 +1022,12 @@ def encode_and_decode(schema_type): r = ComplexRecord(a=1, b=2, color=Color.red, color2=Color.blue, nested=nested_obj2, nested2=nested_obj2, mapNested={ - 'a': NestedObj3(na3=1), + 'a': NestedObj3(na3=1, color=Color.green), 'b': NestedObj3(na3=2), - 'c': NestedObj3(na3=3) + 'c': NestedObj3(na3=3, color=Color.red) }, mapNested2={ - 'd': NestedObj3(na3=4), - 'e': NestedObj3(na3=5), + 'd': NestedObj3(na3=4, color=Color.red), + 'e': NestedObj3(na3=5, color=Color.blue), 'f': NestedObj3(na3=6) }, arrayNested=[ NestedObj4(na4='value na4 1', nb4=100), @@ -1016,32 +1041,9 @@ def encode_and_decode(schema_type): data_decode = data_schema.decode(data_encode) self.assertEqual(data_decode.__class__.__name__, 'ComplexRecord') self.assertEqual(data_decode, r) - self.assertEqual(data_decode.a, 1) - self.assertEqual(data_decode.b, 2) - self.assertEqual(data_decode.color, Color.red) - self.assertEqual(data_decode.color2, Color.blue) - self.assertEqual(data_decode.nested.na2, 22) - self.assertEqual(data_decode.nested.nb2, True) - self.assertEqual(data_decode.nested.nc2.na1, 'na1 value') - self.assertEqual(data_decode.nested.nc2.nb1, 20.5) - self.assertEqual(data_decode.nested2.na2, 22) - self.assertEqual(data_decode.nested2.nb2, True) - self.assertEqual(data_decode.nested2.nc2.na1, 'na1 value') - self.assertEqual(data_decode.nested2.nc2.nb1, 20.5) - self.assertEqual(data_decode.mapNested['a'].na3, 1) - self.assertEqual(data_decode.mapNested['b'].na3, 2) - self.assertEqual(data_decode.mapNested['c'].na3, 3) - self.assertEqual(data_decode.mapNested2['d'].na3, 4) - self.assertEqual(data_decode.mapNested2['e'].na3, 5) - self.assertEqual(data_decode.mapNested2['f'].na3, 6) - self.assertEqual(data_decode.arrayNested[0].na4, 'value na4 1') - self.assertEqual(data_decode.arrayNested[0].nb4, 100) - self.assertEqual(data_decode.arrayNested[1].na4, 'value na4 2') - self.assertEqual(data_decode.arrayNested[1].nb4, 200) - self.assertEqual(data_decode.arrayNested2[0].na4, 'value na4 3') - self.assertEqual(data_decode.arrayNested2[0].nb4, 300) - self.assertEqual(data_decode.arrayNested2[1].na4, 'value na4 4') - self.assertEqual(data_decode.arrayNested2[1].nb4, 400) + self.assertEqual(r.color3, Color.red) + self.assertEqual(r.mapNested['a'].color, Color.green) + self.assertEqual(r.mapNested['b'].color, None) print('Encode and decode complex schema finish. schema_type: ', schema_type) encode_and_decode('avro') @@ -1068,8 +1070,12 @@ class NestedObj2(Record): self.assertEqual(data_decode.na2, 1) self.assertTrue(data_decode.nb2) - def test_produce_and_consume_complex_schema_data(self): + class Color(Enum): + red = 1 + green = 2 + blue = 3 + class NestedObj1(Record): na1 = String() nb1 = Double() @@ -1081,6 +1087,7 @@ class NestedObj2(Record): class NestedObj3(Record): na3 = Integer() + color = CustomEnum(Color, required=True, required_default=True, default=Color.blue) class NestedObj4(Record): na4 = String() @@ -1089,6 +1096,7 @@ class NestedObj4(Record): class ComplexRecord(Record): a = Integer() b = Integer() + color = CustomEnum(Color) nested = NestedObj2() mapNested = Map(NestedObj3()) arrayNested = Array(NestedObj4()) @@ -1111,8 +1119,8 @@ def produce_consume_test(schema_type): nested_obj1 = NestedObj1(na1='na1 value', nb1=20.5) nested_obj2 = NestedObj2(na2=22, nb2=True, nc2=nested_obj1) r = ComplexRecord(a=1, b=2, nested=nested_obj2, mapNested={ - 'a': NestedObj3(na3=1), - 'b': NestedObj3(na3=2), + 'a': NestedObj3(na3=1, color=Color.red), + 'b': NestedObj3(na3=2, color=Color.green), 'c': NestedObj3(na3=3) }, arrayNested=[ NestedObj4(na4='value na4 1', nb4=100), @@ -1124,19 +1132,6 @@ def produce_consume_test(schema_type): value = msg.value() self.assertEqual(value.__class__.__name__, 'ComplexRecord') self.assertEqual(value, r) - self.assertEqual(value.a, 1) - self.assertEqual(value.b, 2) - self.assertEqual(value.nested.na2, 22) - self.assertEqual(value.nested.nb2, True) - self.assertEqual(value.nested.nc2.na1, 'na1 value') - self.assertEqual(value.nested.nc2.nb1, 20.5) - self.assertEqual(value.mapNested['a'].na3, 1) - self.assertEqual(value.mapNested['b'].na3, 2) - self.assertEqual(value.mapNested['c'].na3, 3) - self.assertEqual(value.arrayNested[0].na4, 'value na4 1') - self.assertEqual(value.arrayNested[0].nb4, 100) - self.assertEqual(value.arrayNested[1].na4, 'value na4 2') - self.assertEqual(value.arrayNested[1].nb4, 200) print('Produce and consume complex schema data finish. schema_type', schema_type) @@ -1145,12 +1140,131 @@ def produce_consume_test(schema_type): client.close() - def test(self): - class NamespaceDemo(Record): - _namespace = 'xxx.xxx.xxx' - x = String() - y = Integer() - print('schema: ', NamespaceDemo.schema()) + def custom_schema_test(self): + + def encode_and_decode(schema_definition): + avro_schema = AvroSchema(None, schema_definition=schema_definition) + + company = { + "name": "company-name", + "address": 'xxx road xxx street', + "employees": [ + {"name": "user1", "age": 25}, + {"name": "user2", "age": 30}, + {"name": "user3", "age": 35}, + ], + "labels": { + "industry": "software", + "scale": ">100", + "funds": "1000000.0" + }, + "companyType": "companyType1" + } + data = avro_schema.encode(company) + company_decode = avro_schema.decode(data) + self.assertEqual(company, company_decode) + + schema_definition = { + 'doc': 'this is doc', + 'namespace': 'example.avro', + 'type': 'record', + 'name': 'Company', + 'fields': [ + {'name': 'name', 'type': ['null', 'string']}, + {'name': 'address', 'type': ['null', 'string']}, + {'name': 'employees', 'type': ['null', {'type': 'array', 'items': { + 'type': 'record', + 'name': 'Employee', + 'fields': [ + {'name': 'name', 'type': ['null', 'string']}, + {'name': 'age', 'type': ['null', 'int']} + ] + }}]}, + {'name': 'labels', 'type': ['null', {'type': 'map', 'values': 'string'}]}, + {'name': 'companyType', 'type': ['null', {'type': 'enum', 'name': 'CompanyType', 'symbols': + ['companyType1', 'companyType2', 'companyType3']}]} + ] + } + encode_and_decode(schema_definition) + # Users could load schema from file by `fastavro.schema` + # Or use `avro.schema` like this `avro.schema.parse(open("examples/company.avsc", "rb").read()).to_json()` + encode_and_decode(load_schema("examples/company.avsc")) + + def custom_schema_produce_and_consume_test(self): + client = pulsar.Client(self.serviceUrl) + + def produce_and_consume(topic, schema_definition): + print('custom schema produce and consume test topic - ', topic) + example_avro_schema = AvroSchema(None, schema_definition=schema_definition) + + producer = client.create_producer( + topic=topic, + schema=example_avro_schema) + consumer = client.subscribe(topic, 'test', schema=example_avro_schema) + + for i in range(0, 10): + company = { + "name": "company-name" + str(i), + "address": 'xxx road xxx street ' + str(i), + "employees": [ + {"name": "user" + str(i), "age": 20 + i}, + {"name": "user" + str(i), "age": 30 + i}, + {"name": "user" + str(i), "age": 35 + i}, + ], + "labels": { + "industry": "software" + str(i), + "scale": ">100", + "funds": "1000000.0" + }, + "companyType": "companyType" + str((i % 3) + 1) + } + producer.send(company) + + for i in range(0, 10): + msg = consumer.receive() + company = { + "name": "company-name" + str(i), + "address": 'xxx road xxx street ' + str(i), + "employees": [ + {"name": "user" + str(i), "age": 20 + i}, + {"name": "user" + str(i), "age": 30 + i}, + {"name": "user" + str(i), "age": 35 + i}, + ], + "labels": { + "industry": "software" + str(i), + "scale": ">100", + "funds": "1000000.0" + } + } + self.assertEqual(msg.value(), company) + consumer.acknowledge(msg) + + consumer.close() + producer.close() + + schema_definition = { + 'doc': 'this is doc', + 'namespace': 'example.avro', + 'type': 'record', + 'name': 'Company', + 'fields': [ + {'name': 'name', 'type': ['null', 'string']}, + {'name': 'address', 'type': ['null', 'string']}, + {'name': 'employees', 'type': ['null', {'type': 'array', 'items': { + 'type': 'record', + 'name': 'Employee', + 'fields': [ + {'name': 'name', 'type': ['null', 'string']}, + {'name': 'age', 'type': ['null', 'int']} + ] + }}]}, + {'name': 'labels', 'type': ['null', {'type': 'map', 'values': 'string'}]} + ] + } + produce_and_consume('custom-schema-test-1', schema_definition=schema_definition) + produce_and_consume('custom-schema-test-2', schema_definition=load_schema("examples/company.avsc")) + + client.close() if __name__ == '__main__': main() diff --git a/pulsar-client-cpp/python/setup.py b/pulsar-client-cpp/python/setup.py index d012dfc13df59..3f172fe3d212a 100644 --- a/pulsar-client-cpp/python/setup.py +++ b/pulsar-client-cpp/python/setup.py @@ -82,7 +82,7 @@ def build_extension(self, ext): # functions dependencies extras_require["functions"] = sorted( { - "protobuf>=3.6.1", + "protobuf>=3.6.1,<=3.20.*", "grpcio<1.28,>=1.8.2", "apache-bookkeeper-client>=4.9.2", "prometheus_client", diff --git a/pulsar-client-cpp/python/src/authentication.cc b/pulsar-client-cpp/python/src/authentication.cc index e236c7e0843c5..920a7174b47bb 100644 --- a/pulsar-client-cpp/python/src/authentication.cc +++ b/pulsar-client-cpp/python/src/authentication.cc @@ -26,8 +26,8 @@ AuthenticationWrapper::AuthenticationWrapper(const std::string& dynamicLibPath, } struct AuthenticationTlsWrapper : public AuthenticationWrapper { - AuthenticationTlsWrapper(const std::string& certificatePath, const std::string& privateKeyPath) : - AuthenticationWrapper() { + AuthenticationTlsWrapper(const std::string& certificatePath, const std::string& privateKeyPath) + : AuthenticationWrapper() { this->auth = AuthTls::create(certificatePath, privateKeyPath); } }; @@ -35,13 +35,10 @@ struct AuthenticationTlsWrapper : public AuthenticationWrapper { struct TokenSupplierWrapper { PyObject* _pySupplier; - TokenSupplierWrapper(py::object pySupplier) : - _pySupplier(pySupplier.ptr()) { - Py_XINCREF(_pySupplier); - } + TokenSupplierWrapper(py::object pySupplier) : _pySupplier(pySupplier.ptr()) { Py_XINCREF(_pySupplier); } TokenSupplierWrapper(const TokenSupplierWrapper& other) { - _pySupplier= other._pySupplier; + _pySupplier = other._pySupplier; Py_XINCREF(_pySupplier); } @@ -51,9 +48,7 @@ struct TokenSupplierWrapper { return *this; } - virtual ~TokenSupplierWrapper() { - Py_XDECREF(_pySupplier); - } + virtual ~TokenSupplierWrapper() { Py_XDECREF(_pySupplier); } std::string operator()() { PyGILState_STATE state = PyGILState_Ensure(); @@ -61,7 +56,7 @@ struct TokenSupplierWrapper { std::string token; try { token = py::call(_pySupplier); - } catch(const py::error_already_set& e) { + } catch (const py::error_already_set& e) { PyErr_Print(); } @@ -70,10 +65,8 @@ struct TokenSupplierWrapper { } }; - struct AuthenticationTokenWrapper : public AuthenticationWrapper { - AuthenticationTokenWrapper(py::object token) : - AuthenticationWrapper() { + AuthenticationTokenWrapper(py::object token) : AuthenticationWrapper() { if (py::extract(token).check()) { // It's a string std::string tokenStr = py::extract(token); @@ -86,15 +79,13 @@ struct AuthenticationTokenWrapper : public AuthenticationWrapper { }; struct AuthenticationAthenzWrapper : public AuthenticationWrapper { - AuthenticationAthenzWrapper(const std::string& authParamsString) : - AuthenticationWrapper() { + AuthenticationAthenzWrapper(const std::string& authParamsString) : AuthenticationWrapper() { this->auth = AuthAthenz::create(authParamsString); } }; struct AuthenticationOauth2Wrapper : public AuthenticationWrapper { - AuthenticationOauth2Wrapper(const std::string& authParamsString) : - AuthenticationWrapper() { + AuthenticationOauth2Wrapper(const std::string& authParamsString) : AuthenticationWrapper() { this->auth = AuthOauth2::create(authParamsString); } }; @@ -102,22 +93,17 @@ struct AuthenticationOauth2Wrapper : public AuthenticationWrapper { void export_authentication() { using namespace boost::python; - class_("Authentication", init()) - ; + class_("Authentication", init()); - class_ >("AuthenticationTLS", - init()) - ; + class_ >( + "AuthenticationTLS", init()); class_ >("AuthenticationToken", - init()) - ; + init()); class_ >("AuthenticationAthenz", - init()) - ; + init()); class_ >("AuthenticationOauth2", - init()) - ; + init()); } diff --git a/pulsar-client-cpp/python/src/client.cc b/pulsar-client-cpp/python/src/client.cc index 3dcbf7fc831a0..445a6dcfa0c37 100644 --- a/pulsar-client-cpp/python/src/client.cc +++ b/pulsar-client-cpp/python/src/client.cc @@ -22,11 +22,10 @@ Producer Client_createProducer(Client& client, const std::string& topic, const P Producer producer; Result res; - Py_BEGIN_ALLOW_THREADS - res = client.createProducer(topic, conf, producer); + Py_BEGIN_ALLOW_THREADS res = client.createProducer(topic, conf, producer); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return producer; } @@ -35,11 +34,10 @@ Consumer Client_subscribe(Client& client, const std::string& topic, const std::s Consumer consumer; Result res; - Py_BEGIN_ALLOW_THREADS - res = client.subscribe(topic, subscriptionName, conf, consumer); + Py_BEGIN_ALLOW_THREADS res = client.subscribe(topic, subscriptionName, conf, consumer); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return consumer; } @@ -50,43 +48,39 @@ Consumer Client_subscribe_topics(Client& client, boost::python::list& topics, std::vector topics_vector; - for (int i = 0; i < len(topics); i ++) { + for (int i = 0; i < len(topics); i++) { std::string content = boost::python::extract(topics[i]); topics_vector.push_back(content); } - Py_BEGIN_ALLOW_THREADS - res = client.subscribe(topics_vector, subscriptionName, conf, consumer); + Py_BEGIN_ALLOW_THREADS res = client.subscribe(topics_vector, subscriptionName, conf, consumer); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return consumer; } -Consumer Client_subscribe_pattern(Client& client, const std::string& topic_pattern, const std::string& subscriptionName, - const ConsumerConfiguration& conf) { +Consumer Client_subscribe_pattern(Client& client, const std::string& topic_pattern, + const std::string& subscriptionName, const ConsumerConfiguration& conf) { Consumer consumer; Result res; - Py_BEGIN_ALLOW_THREADS - res = client.subscribeWithRegex(topic_pattern, subscriptionName, conf, consumer); + Py_BEGIN_ALLOW_THREADS res = client.subscribeWithRegex(topic_pattern, subscriptionName, conf, consumer); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return consumer; } -Reader Client_createReader(Client& client, const std::string& topic, - const MessageId& startMessageId, +Reader Client_createReader(Client& client, const std::string& topic, const MessageId& startMessageId, const ReaderConfiguration& conf) { Reader reader; Result res; - Py_BEGIN_ALLOW_THREADS - res = client.createReader(topic, startMessageId, conf, reader); + Py_BEGIN_ALLOW_THREADS res = client.createReader(topic, startMessageId, conf, reader); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return reader; } @@ -94,11 +88,10 @@ boost::python::list Client_getTopicPartitions(Client& client, const std::string& std::vector partitions; Result res; - Py_BEGIN_ALLOW_THREADS - res = client.getPartitionsForTopic(topic, partitions); + Py_BEGIN_ALLOW_THREADS res = client.getPartitionsForTopic(topic, partitions); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); boost::python::list pyList; for (int i = 0; i < partitions.size(); i++) { @@ -111,24 +104,22 @@ boost::python::list Client_getTopicPartitions(Client& client, const std::string& void Client_close(Client& client) { Result res; - Py_BEGIN_ALLOW_THREADS - res = client.close(); + Py_BEGIN_ALLOW_THREADS res = client.close(); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void export_client() { using namespace boost::python; - class_("Client", init()) - .def("create_producer", &Client_createProducer) - .def("subscribe", &Client_subscribe) - .def("subscribe_topics", &Client_subscribe_topics) - .def("subscribe_pattern", &Client_subscribe_pattern) - .def("create_reader", &Client_createReader) - .def("get_topic_partitions", &Client_getTopicPartitions) - .def("close", &Client_close) - .def("shutdown", &Client::shutdown) - ; + class_("Client", init()) + .def("create_producer", &Client_createProducer) + .def("subscribe", &Client_subscribe) + .def("subscribe_topics", &Client_subscribe_topics) + .def("subscribe_pattern", &Client_subscribe_pattern) + .def("create_reader", &Client_createReader) + .def("get_topic_partitions", &Client_getTopicPartitions) + .def("close", &Client_close) + .def("shutdown", &Client::shutdown); } diff --git a/pulsar-client-cpp/python/src/config.cc b/pulsar-client-cpp/python/src/config.cc index a2872481b10da..2dee1a1183d81 100644 --- a/pulsar-client-cpp/python/src/config.cc +++ b/pulsar-client-cpp/python/src/config.cc @@ -21,14 +21,11 @@ #include "lib/Utils.h" #include -template +template struct ListenerWrapper { PyObject* _pyListener; - ListenerWrapper(py::object pyListener) : - _pyListener(pyListener.ptr()) { - Py_XINCREF(_pyListener); - } + ListenerWrapper(py::object pyListener) : _pyListener(pyListener.ptr()) { Py_XINCREF(_pyListener); } ListenerWrapper(const ListenerWrapper& other) { _pyListener = other._pyListener; @@ -41,9 +38,7 @@ struct ListenerWrapper { return *this; } - virtual ~ListenerWrapper() { - Py_XDECREF(_pyListener); - } + virtual ~ListenerWrapper() { Py_XDECREF(_pyListener); } void operator()(T consumer, const Message& msg) { PyGILState_STATE state = PyGILState_Ensure(); @@ -65,7 +60,7 @@ static ConsumerConfiguration& ConsumerConfiguration_setMessageListener(ConsumerC } static ReaderConfiguration& ReaderConfiguration_setReaderListener(ReaderConfiguration& conf, - py::object pyListener) { + py::object pyListener) { conf.setReaderListener(ListenerWrapper(pyListener)); return conf; } @@ -78,41 +73,36 @@ static ClientConfiguration& ClientConfiguration_setAuthentication(ClientConfigur } static ConsumerConfiguration& ConsumerConfiguration_setCryptoKeyReader(ConsumerConfiguration& conf, - py::object cryptoKeyReader) { + py::object cryptoKeyReader) { CryptoKeyReaderWrapper cryptoKeyReaderWrapper = py::extract(cryptoKeyReader); conf.setCryptoKeyReader(cryptoKeyReaderWrapper.cryptoKeyReader); return conf; } static ProducerConfiguration& ProducerConfiguration_setCryptoKeyReader(ProducerConfiguration& conf, - py::object cryptoKeyReader) { + py::object cryptoKeyReader) { CryptoKeyReaderWrapper cryptoKeyReaderWrapper = py::extract(cryptoKeyReader); conf.setCryptoKeyReader(cryptoKeyReaderWrapper.cryptoKeyReader); return conf; } static ReaderConfiguration& ReaderConfiguration_setCryptoKeyReader(ReaderConfiguration& conf, - py::object cryptoKeyReader) { + py::object cryptoKeyReader) { CryptoKeyReaderWrapper cryptoKeyReaderWrapper = py::extract(cryptoKeyReader); conf.setCryptoKeyReader(cryptoKeyReaderWrapper.cryptoKeyReader); return conf; } -class LoggerWrapper: public Logger { +class LoggerWrapper : public Logger { PyObject* const _pyLogger; const int _pythonLogLevel; const std::unique_ptr _fallbackLogger; - static constexpr int _getLogLevelValue(Level level) { - return 10 + (level * 10); - } + static constexpr int _getLogLevelValue(Level level) { return 10 + (level * 10); } public: - LoggerWrapper(PyObject* pyLogger, int pythonLogLevel, Logger* fallbackLogger) - : _pyLogger(pyLogger), - _pythonLogLevel(pythonLogLevel), - _fallbackLogger(fallbackLogger) { + : _pyLogger(pyLogger), _pythonLogLevel(pythonLogLevel), _fallbackLogger(fallbackLogger) { Py_XINCREF(_pyLogger); } @@ -121,13 +111,9 @@ class LoggerWrapper: public Logger { LoggerWrapper& operator=(const LoggerWrapper&) = delete; LoggerWrapper& operator=(LoggerWrapper&&) = delete; - virtual ~LoggerWrapper() { - Py_XDECREF(_pyLogger); - } + virtual ~LoggerWrapper() { Py_XDECREF(_pyLogger); } - bool isEnabled(Level level) { - return _getLogLevelValue(level) >= _pythonLogLevel; - } + bool isEnabled(Level level) { return _getLogLevelValue(level) >= _pythonLogLevel; } void log(Level level, int line, const std::string& message) { if (!Py_IsInitialized()) { @@ -187,11 +173,9 @@ class LoggerWrapperFactory : public LoggerFactory { initializePythonLogLevel(); } - virtual ~LoggerWrapperFactory() { - Py_XDECREF(_pyLogger); - } + virtual ~LoggerWrapperFactory() { Py_XDECREF(_pyLogger); } - Logger* getLogger(const std::string &fileName) { + Logger* getLogger(const std::string& fileName) { const auto fallbackLogger = _fallbackLoggerFactory->getLogger(fileName); if (_pythonLogLevel.is_present()) { return new LoggerWrapper(_pyLogger, _pythonLogLevel.value(), fallbackLogger); @@ -206,112 +190,128 @@ static ClientConfiguration& ClientConfiguration_setLogger(ClientConfiguration& c return conf; } - void export_config() { using namespace boost::python; class_("ClientConfiguration") - .def("authentication", &ClientConfiguration_setAuthentication, return_self<>()) - .def("operation_timeout_seconds", &ClientConfiguration::getOperationTimeoutSeconds) - .def("operation_timeout_seconds", &ClientConfiguration::setOperationTimeoutSeconds, return_self<>()) - .def("connection_timeout", &ClientConfiguration::getConnectionTimeout) - .def("connection_timeout", &ClientConfiguration::setConnectionTimeout, return_self<>()) - .def("io_threads", &ClientConfiguration::getIOThreads) - .def("io_threads", &ClientConfiguration::setIOThreads, return_self<>()) - .def("message_listener_threads", &ClientConfiguration::getMessageListenerThreads) - .def("message_listener_threads", &ClientConfiguration::setMessageListenerThreads, return_self<>()) - .def("concurrent_lookup_requests", &ClientConfiguration::getConcurrentLookupRequest) - .def("concurrent_lookup_requests", &ClientConfiguration::setConcurrentLookupRequest, return_self<>()) - .def("log_conf_file_path", &ClientConfiguration::getLogConfFilePath, return_value_policy()) - .def("log_conf_file_path", &ClientConfiguration::setLogConfFilePath, return_self<>()) - .def("use_tls", &ClientConfiguration::isUseTls) - .def("use_tls", &ClientConfiguration::setUseTls, return_self<>()) - .def("tls_trust_certs_file_path", &ClientConfiguration::getTlsTrustCertsFilePath, return_value_policy()) - .def("tls_trust_certs_file_path", &ClientConfiguration::setTlsTrustCertsFilePath, return_self<>()) - .def("tls_allow_insecure_connection", &ClientConfiguration::isTlsAllowInsecureConnection) - .def("tls_allow_insecure_connection", &ClientConfiguration::setTlsAllowInsecureConnection, return_self<>()) - .def("tls_validate_hostname", &ClientConfiguration::setValidateHostName, return_self<>()) - .def("set_logger", &ClientConfiguration_setLogger, return_self<>()) - ; + .def("authentication", &ClientConfiguration_setAuthentication, return_self<>()) + .def("operation_timeout_seconds", &ClientConfiguration::getOperationTimeoutSeconds) + .def("operation_timeout_seconds", &ClientConfiguration::setOperationTimeoutSeconds, return_self<>()) + .def("connection_timeout", &ClientConfiguration::getConnectionTimeout) + .def("connection_timeout", &ClientConfiguration::setConnectionTimeout, return_self<>()) + .def("io_threads", &ClientConfiguration::getIOThreads) + .def("io_threads", &ClientConfiguration::setIOThreads, return_self<>()) + .def("message_listener_threads", &ClientConfiguration::getMessageListenerThreads) + .def("message_listener_threads", &ClientConfiguration::setMessageListenerThreads, return_self<>()) + .def("concurrent_lookup_requests", &ClientConfiguration::getConcurrentLookupRequest) + .def("concurrent_lookup_requests", &ClientConfiguration::setConcurrentLookupRequest, return_self<>()) + .def("log_conf_file_path", &ClientConfiguration::getLogConfFilePath, + return_value_policy()) + .def("log_conf_file_path", &ClientConfiguration::setLogConfFilePath, return_self<>()) + .def("use_tls", &ClientConfiguration::isUseTls) + .def("use_tls", &ClientConfiguration::setUseTls, return_self<>()) + .def("tls_trust_certs_file_path", &ClientConfiguration::getTlsTrustCertsFilePath, + return_value_policy()) + .def("tls_trust_certs_file_path", &ClientConfiguration::setTlsTrustCertsFilePath, return_self<>()) + .def("tls_allow_insecure_connection", &ClientConfiguration::isTlsAllowInsecureConnection) + .def("tls_allow_insecure_connection", &ClientConfiguration::setTlsAllowInsecureConnection, + return_self<>()) + .def("tls_validate_hostname", &ClientConfiguration::setValidateHostName, return_self<>()) + .def("set_logger", &ClientConfiguration_setLogger, return_self<>()); class_("ProducerConfiguration") - .def("producer_name", &ProducerConfiguration::getProducerName, return_value_policy()) - .def("producer_name", &ProducerConfiguration::setProducerName, return_self<>()) - .def("schema", &ProducerConfiguration::getSchema, return_value_policy()) - .def("schema", &ProducerConfiguration::setSchema, return_self<>()) - .def("send_timeout_millis", &ProducerConfiguration::getSendTimeout) - .def("send_timeout_millis", &ProducerConfiguration::setSendTimeout, return_self<>()) - .def("initial_sequence_id", &ProducerConfiguration::getInitialSequenceId) - .def("initial_sequence_id", &ProducerConfiguration::setInitialSequenceId, return_self<>()) - .def("compression_type", &ProducerConfiguration::getCompressionType) - .def("compression_type", &ProducerConfiguration::setCompressionType, return_self<>()) - .def("max_pending_messages", &ProducerConfiguration::getMaxPendingMessages) - .def("max_pending_messages", &ProducerConfiguration::setMaxPendingMessages, return_self<>()) - .def("max_pending_messages_across_partitions", &ProducerConfiguration::getMaxPendingMessagesAcrossPartitions) - .def("max_pending_messages_across_partitions", &ProducerConfiguration::setMaxPendingMessagesAcrossPartitions, return_self<>()) - .def("block_if_queue_full", &ProducerConfiguration::getBlockIfQueueFull) - .def("block_if_queue_full", &ProducerConfiguration::setBlockIfQueueFull, return_self<>()) - .def("partitions_routing_mode", &ProducerConfiguration::getPartitionsRoutingMode) - .def("partitions_routing_mode", &ProducerConfiguration::setPartitionsRoutingMode, return_self<>()) - .def("lazy_start_partitioned_producers", &ProducerConfiguration::getLazyStartPartitionedProducers) - .def("lazy_start_partitioned_producers", &ProducerConfiguration::setLazyStartPartitionedProducers, return_self<>()) - .def("batching_enabled", &ProducerConfiguration::getBatchingEnabled, return_value_policy()) - .def("batching_enabled", &ProducerConfiguration::setBatchingEnabled, return_self<>()) - .def("batching_max_messages", &ProducerConfiguration::getBatchingMaxMessages, return_value_policy()) - .def("batching_max_messages", &ProducerConfiguration::setBatchingMaxMessages, return_self<>()) - .def("batching_max_allowed_size_in_bytes", &ProducerConfiguration::getBatchingMaxAllowedSizeInBytes, return_value_policy()) - .def("batching_max_allowed_size_in_bytes", &ProducerConfiguration::setBatchingMaxAllowedSizeInBytes, return_self<>()) - .def("batching_max_publish_delay_ms", &ProducerConfiguration::getBatchingMaxPublishDelayMs, return_value_policy()) - .def("batching_max_publish_delay_ms", &ProducerConfiguration::setBatchingMaxPublishDelayMs, return_self<>()) - .def("property", &ProducerConfiguration::setProperty, return_self<>()) - .def("batching_type", &ProducerConfiguration::setBatchingType, return_self<>()) - .def("batching_type", &ProducerConfiguration::getBatchingType) - .def("encryption_key", &ProducerConfiguration::addEncryptionKey, return_self<>()) - .def("crypto_key_reader", &ProducerConfiguration_setCryptoKeyReader, return_self<>()) - ; + .def("producer_name", &ProducerConfiguration::getProducerName, + return_value_policy()) + .def("producer_name", &ProducerConfiguration::setProducerName, return_self<>()) + .def("schema", &ProducerConfiguration::getSchema, return_value_policy()) + .def("schema", &ProducerConfiguration::setSchema, return_self<>()) + .def("send_timeout_millis", &ProducerConfiguration::getSendTimeout) + .def("send_timeout_millis", &ProducerConfiguration::setSendTimeout, return_self<>()) + .def("initial_sequence_id", &ProducerConfiguration::getInitialSequenceId) + .def("initial_sequence_id", &ProducerConfiguration::setInitialSequenceId, return_self<>()) + .def("compression_type", &ProducerConfiguration::getCompressionType) + .def("compression_type", &ProducerConfiguration::setCompressionType, return_self<>()) + .def("max_pending_messages", &ProducerConfiguration::getMaxPendingMessages) + .def("max_pending_messages", &ProducerConfiguration::setMaxPendingMessages, return_self<>()) + .def("max_pending_messages_across_partitions", + &ProducerConfiguration::getMaxPendingMessagesAcrossPartitions) + .def("max_pending_messages_across_partitions", + &ProducerConfiguration::setMaxPendingMessagesAcrossPartitions, return_self<>()) + .def("block_if_queue_full", &ProducerConfiguration::getBlockIfQueueFull) + .def("block_if_queue_full", &ProducerConfiguration::setBlockIfQueueFull, return_self<>()) + .def("partitions_routing_mode", &ProducerConfiguration::getPartitionsRoutingMode) + .def("partitions_routing_mode", &ProducerConfiguration::setPartitionsRoutingMode, return_self<>()) + .def("lazy_start_partitioned_producers", &ProducerConfiguration::getLazyStartPartitionedProducers) + .def("lazy_start_partitioned_producers", &ProducerConfiguration::setLazyStartPartitionedProducers, + return_self<>()) + .def("batching_enabled", &ProducerConfiguration::getBatchingEnabled, + return_value_policy()) + .def("batching_enabled", &ProducerConfiguration::setBatchingEnabled, return_self<>()) + .def("batching_max_messages", &ProducerConfiguration::getBatchingMaxMessages, + return_value_policy()) + .def("batching_max_messages", &ProducerConfiguration::setBatchingMaxMessages, return_self<>()) + .def("batching_max_allowed_size_in_bytes", &ProducerConfiguration::getBatchingMaxAllowedSizeInBytes, + return_value_policy()) + .def("batching_max_allowed_size_in_bytes", &ProducerConfiguration::setBatchingMaxAllowedSizeInBytes, + return_self<>()) + .def("batching_max_publish_delay_ms", &ProducerConfiguration::getBatchingMaxPublishDelayMs, + return_value_policy()) + .def("batching_max_publish_delay_ms", &ProducerConfiguration::setBatchingMaxPublishDelayMs, + return_self<>()) + .def("property", &ProducerConfiguration::setProperty, return_self<>()) + .def("batching_type", &ProducerConfiguration::setBatchingType, return_self<>()) + .def("batching_type", &ProducerConfiguration::getBatchingType) + .def("encryption_key", &ProducerConfiguration::addEncryptionKey, return_self<>()) + .def("crypto_key_reader", &ProducerConfiguration_setCryptoKeyReader, return_self<>()); class_("ConsumerConfiguration") - .def("consumer_type", &ConsumerConfiguration::getConsumerType) - .def("consumer_type", &ConsumerConfiguration::setConsumerType, return_self<>()) - .def("schema", &ConsumerConfiguration::getSchema, return_value_policy()) - .def("schema", &ConsumerConfiguration::setSchema, return_self<>()) - .def("message_listener", &ConsumerConfiguration_setMessageListener, return_self<>()) - .def("receiver_queue_size", &ConsumerConfiguration::getReceiverQueueSize) - .def("receiver_queue_size", &ConsumerConfiguration::setReceiverQueueSize) - .def("max_total_receiver_queue_size_across_partitions", &ConsumerConfiguration::getMaxTotalReceiverQueueSizeAcrossPartitions) - .def("max_total_receiver_queue_size_across_partitions", &ConsumerConfiguration::setMaxTotalReceiverQueueSizeAcrossPartitions) - .def("consumer_name", &ConsumerConfiguration::getConsumerName, return_value_policy()) - .def("consumer_name", &ConsumerConfiguration::setConsumerName) - .def("unacked_messages_timeout_ms", &ConsumerConfiguration::getUnAckedMessagesTimeoutMs) - .def("unacked_messages_timeout_ms", &ConsumerConfiguration::setUnAckedMessagesTimeoutMs) - .def("negative_ack_redelivery_delay_ms", &ConsumerConfiguration::getNegativeAckRedeliveryDelayMs) - .def("negative_ack_redelivery_delay_ms", &ConsumerConfiguration::setNegativeAckRedeliveryDelayMs) - .def("broker_consumer_stats_cache_time_ms", &ConsumerConfiguration::getBrokerConsumerStatsCacheTimeInMs) - .def("broker_consumer_stats_cache_time_ms", &ConsumerConfiguration::setBrokerConsumerStatsCacheTimeInMs) - .def("pattern_auto_discovery_period", &ConsumerConfiguration::getPatternAutoDiscoveryPeriod) - .def("pattern_auto_discovery_period", &ConsumerConfiguration::setPatternAutoDiscoveryPeriod) - .def("read_compacted", &ConsumerConfiguration::isReadCompacted) - .def("read_compacted", &ConsumerConfiguration::setReadCompacted) - .def("property", &ConsumerConfiguration::setProperty, return_self<>()) - .def("subscription_initial_position", &ConsumerConfiguration::getSubscriptionInitialPosition) - .def("subscription_initial_position", &ConsumerConfiguration::setSubscriptionInitialPosition) - .def("crypto_key_reader", &ConsumerConfiguration_setCryptoKeyReader, return_self<>()) - .def("replicate_subscription_state_enabled", &ConsumerConfiguration::setReplicateSubscriptionStateEnabled) - .def("replicate_subscription_state_enabled", &ConsumerConfiguration::isReplicateSubscriptionStateEnabled) - ; + .def("consumer_type", &ConsumerConfiguration::getConsumerType) + .def("consumer_type", &ConsumerConfiguration::setConsumerType, return_self<>()) + .def("schema", &ConsumerConfiguration::getSchema, return_value_policy()) + .def("schema", &ConsumerConfiguration::setSchema, return_self<>()) + .def("message_listener", &ConsumerConfiguration_setMessageListener, return_self<>()) + .def("receiver_queue_size", &ConsumerConfiguration::getReceiverQueueSize) + .def("receiver_queue_size", &ConsumerConfiguration::setReceiverQueueSize) + .def("max_total_receiver_queue_size_across_partitions", + &ConsumerConfiguration::getMaxTotalReceiverQueueSizeAcrossPartitions) + .def("max_total_receiver_queue_size_across_partitions", + &ConsumerConfiguration::setMaxTotalReceiverQueueSizeAcrossPartitions) + .def("consumer_name", &ConsumerConfiguration::getConsumerName, + return_value_policy()) + .def("consumer_name", &ConsumerConfiguration::setConsumerName) + .def("unacked_messages_timeout_ms", &ConsumerConfiguration::getUnAckedMessagesTimeoutMs) + .def("unacked_messages_timeout_ms", &ConsumerConfiguration::setUnAckedMessagesTimeoutMs) + .def("negative_ack_redelivery_delay_ms", &ConsumerConfiguration::getNegativeAckRedeliveryDelayMs) + .def("negative_ack_redelivery_delay_ms", &ConsumerConfiguration::setNegativeAckRedeliveryDelayMs) + .def("broker_consumer_stats_cache_time_ms", + &ConsumerConfiguration::getBrokerConsumerStatsCacheTimeInMs) + .def("broker_consumer_stats_cache_time_ms", + &ConsumerConfiguration::setBrokerConsumerStatsCacheTimeInMs) + .def("pattern_auto_discovery_period", &ConsumerConfiguration::getPatternAutoDiscoveryPeriod) + .def("pattern_auto_discovery_period", &ConsumerConfiguration::setPatternAutoDiscoveryPeriod) + .def("read_compacted", &ConsumerConfiguration::isReadCompacted) + .def("read_compacted", &ConsumerConfiguration::setReadCompacted) + .def("property", &ConsumerConfiguration::setProperty, return_self<>()) + .def("subscription_initial_position", &ConsumerConfiguration::getSubscriptionInitialPosition) + .def("subscription_initial_position", &ConsumerConfiguration::setSubscriptionInitialPosition) + .def("crypto_key_reader", &ConsumerConfiguration_setCryptoKeyReader, return_self<>()) + .def("replicate_subscription_state_enabled", + &ConsumerConfiguration::setReplicateSubscriptionStateEnabled) + .def("replicate_subscription_state_enabled", + &ConsumerConfiguration::isReplicateSubscriptionStateEnabled); class_("ReaderConfiguration") - .def("reader_listener", &ReaderConfiguration_setReaderListener, return_self<>()) - .def("schema", &ReaderConfiguration::getSchema, return_value_policy()) - .def("schema", &ReaderConfiguration::setSchema, return_self<>()) - .def("receiver_queue_size", &ReaderConfiguration::getReceiverQueueSize) - .def("receiver_queue_size", &ReaderConfiguration::setReceiverQueueSize) - .def("reader_name", &ReaderConfiguration::getReaderName, return_value_policy()) - .def("reader_name", &ReaderConfiguration::setReaderName) - .def("subscription_role_prefix", &ReaderConfiguration::getSubscriptionRolePrefix, return_value_policy()) - .def("subscription_role_prefix", &ReaderConfiguration::setSubscriptionRolePrefix) - .def("read_compacted", &ReaderConfiguration::isReadCompacted) - .def("read_compacted", &ReaderConfiguration::setReadCompacted) - .def("crypto_key_reader", &ReaderConfiguration_setCryptoKeyReader, return_self<>()) - ; + .def("reader_listener", &ReaderConfiguration_setReaderListener, return_self<>()) + .def("schema", &ReaderConfiguration::getSchema, return_value_policy()) + .def("schema", &ReaderConfiguration::setSchema, return_self<>()) + .def("receiver_queue_size", &ReaderConfiguration::getReceiverQueueSize) + .def("receiver_queue_size", &ReaderConfiguration::setReceiverQueueSize) + .def("reader_name", &ReaderConfiguration::getReaderName, return_value_policy()) + .def("reader_name", &ReaderConfiguration::setReaderName) + .def("subscription_role_prefix", &ReaderConfiguration::getSubscriptionRolePrefix, + return_value_policy()) + .def("subscription_role_prefix", &ReaderConfiguration::setSubscriptionRolePrefix) + .def("read_compacted", &ReaderConfiguration::isReadCompacted) + .def("read_compacted", &ReaderConfiguration::setReadCompacted) + .def("crypto_key_reader", &ReaderConfiguration_setCryptoKeyReader, return_self<>()); } diff --git a/pulsar-client-cpp/python/src/consumer.cc b/pulsar-client-cpp/python/src/consumer.cc index 815282d8876ca..28bedad99482a 100644 --- a/pulsar-client-cpp/python/src/consumer.cc +++ b/pulsar-client-cpp/python/src/consumer.cc @@ -20,11 +20,10 @@ void Consumer_unsubscribe(Consumer& consumer) { Result res; - Py_BEGIN_ALLOW_THREADS - res = consumer.unsubscribe(); + Py_BEGIN_ALLOW_THREADS res = consumer.unsubscribe(); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } Message Consumer_receive(Consumer& consumer) { @@ -32,11 +31,10 @@ Message Consumer_receive(Consumer& consumer) { Result res; while (true) { - Py_BEGIN_ALLOW_THREADS - res = consumer.receive(msg); + Py_BEGIN_ALLOW_THREADS res = consumer.receive(msg); Py_END_ALLOW_THREADS - if (res != ResultTimeout) { + if (res != ResultTimeout) { // In case of timeout we keep calling receive() to simulate a // blocking call until a message is available, while breaking // every once in a while to check the Python signal status @@ -56,17 +54,14 @@ Message Consumer_receive(Consumer& consumer) { Message Consumer_receive_timeout(Consumer& consumer, int timeoutMs) { Message msg; Result res; - Py_BEGIN_ALLOW_THREADS - res = consumer.receive(msg, timeoutMs); + Py_BEGIN_ALLOW_THREADS res = consumer.receive(msg, timeoutMs); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return msg; } -void Consumer_acknowledge(Consumer& consumer, const Message& msg) { - consumer.acknowledgeAsync(msg, nullptr); -} +void Consumer_acknowledge(Consumer& consumer, const Message& msg) { consumer.acknowledgeAsync(msg, nullptr); } void Consumer_acknowledge_message_id(Consumer& consumer, const MessageId& msgId) { consumer.acknowledgeAsync(msgId, nullptr); @@ -77,7 +72,7 @@ void Consumer_negative_acknowledge(Consumer& consumer, const Message& msg) { } void Consumer_negative_acknowledge_message_id(Consumer& consumer, const MessageId& msgId) { - consumer.negativeAcknowledge(msgId); + consumer.negativeAcknowledge(msgId); } void Consumer_acknowledge_cumulative(Consumer& consumer, const Message& msg) { @@ -90,60 +85,52 @@ void Consumer_acknowledge_cumulative_message_id(Consumer& consumer, const Messag void Consumer_close(Consumer& consumer) { Result res; - Py_BEGIN_ALLOW_THREADS - res = consumer.close(); + Py_BEGIN_ALLOW_THREADS res = consumer.close(); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } -void Consumer_pauseMessageListener(Consumer& consumer) { - CHECK_RESULT(consumer.pauseMessageListener()); -} +void Consumer_pauseMessageListener(Consumer& consumer) { CHECK_RESULT(consumer.pauseMessageListener()); } -void Consumer_resumeMessageListener(Consumer& consumer) { - CHECK_RESULT(consumer.resumeMessageListener()); -} +void Consumer_resumeMessageListener(Consumer& consumer) { CHECK_RESULT(consumer.resumeMessageListener()); } void Consumer_seek(Consumer& consumer, const MessageId& msgId) { Result res; - Py_BEGIN_ALLOW_THREADS - res = consumer.seek(msgId); + Py_BEGIN_ALLOW_THREADS res = consumer.seek(msgId); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void Consumer_seek_timestamp(Consumer& consumer, uint64_t timestamp) { Result res; - Py_BEGIN_ALLOW_THREADS - res = consumer.seek(timestamp); + Py_BEGIN_ALLOW_THREADS res = consumer.seek(timestamp); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void export_consumer() { using namespace boost::python; class_("Consumer", no_init) - .def("topic", &Consumer::getTopic, "return the topic this consumer is subscribed to", - return_value_policy()) - .def("subscription_name", &Consumer::getSubscriptionName, return_value_policy()) - .def("unsubscribe", &Consumer_unsubscribe) - .def("receive", &Consumer_receive) - .def("receive", &Consumer_receive_timeout) - .def("acknowledge", &Consumer_acknowledge) - .def("acknowledge", &Consumer_acknowledge_message_id) - .def("acknowledge_cumulative", &Consumer_acknowledge_cumulative) - .def("acknowledge_cumulative", &Consumer_acknowledge_cumulative_message_id) - .def("negative_acknowledge", &Consumer_negative_acknowledge) - .def("negative_acknowledge", &Consumer_negative_acknowledge_message_id) - .def("close", &Consumer_close) - .def("pause_message_listener", &Consumer_pauseMessageListener) - .def("resume_message_listener", &Consumer_resumeMessageListener) - .def("redeliver_unacknowledged_messages", &Consumer::redeliverUnacknowledgedMessages) - .def("seek", &Consumer_seek) - .def("seek", &Consumer_seek_timestamp) - ; + .def("topic", &Consumer::getTopic, "return the topic this consumer is subscribed to", + return_value_policy()) + .def("subscription_name", &Consumer::getSubscriptionName, return_value_policy()) + .def("unsubscribe", &Consumer_unsubscribe) + .def("receive", &Consumer_receive) + .def("receive", &Consumer_receive_timeout) + .def("acknowledge", &Consumer_acknowledge) + .def("acknowledge", &Consumer_acknowledge_message_id) + .def("acknowledge_cumulative", &Consumer_acknowledge_cumulative) + .def("acknowledge_cumulative", &Consumer_acknowledge_cumulative_message_id) + .def("negative_acknowledge", &Consumer_negative_acknowledge) + .def("negative_acknowledge", &Consumer_negative_acknowledge_message_id) + .def("close", &Consumer_close) + .def("pause_message_listener", &Consumer_pauseMessageListener) + .def("resume_message_listener", &Consumer_resumeMessageListener) + .def("redeliver_unacknowledged_messages", &Consumer::redeliverUnacknowledgedMessages) + .def("seek", &Consumer_seek) + .def("seek", &Consumer_seek_timestamp); } diff --git a/pulsar-client-cpp/python/src/cryptoKeyReader.cc b/pulsar-client-cpp/python/src/cryptoKeyReader.cc index ccefe6f18b970..2c46b6fb5af19 100644 --- a/pulsar-client-cpp/python/src/cryptoKeyReader.cc +++ b/pulsar-client-cpp/python/src/cryptoKeyReader.cc @@ -21,7 +21,7 @@ CryptoKeyReaderWrapper::CryptoKeyReaderWrapper() {} CryptoKeyReaderWrapper::CryptoKeyReaderWrapper(const std::string& publicKeyPath, - const std::string& privateKeyPath) { + const std::string& privateKeyPath) { this->cryptoKeyReader = DefaultCryptoKeyReader::create(publicKeyPath, privateKeyPath); } diff --git a/pulsar-client-cpp/python/src/enums.cc b/pulsar-client-cpp/python/src/enums.cc index c23b211ffdc15..1b21af585ed54 100644 --- a/pulsar-client-cpp/python/src/enums.cc +++ b/pulsar-client-cpp/python/src/enums.cc @@ -18,104 +18,96 @@ */ #include "utils.h" - void export_enums() { using namespace boost::python; enum_("PartitionsRoutingMode") - .value("UseSinglePartition", ProducerConfiguration::UseSinglePartition) - .value("RoundRobinDistribution", ProducerConfiguration::RoundRobinDistribution) - .value("CustomPartition", ProducerConfiguration::CustomPartition) - ; + .value("UseSinglePartition", ProducerConfiguration::UseSinglePartition) + .value("RoundRobinDistribution", ProducerConfiguration::RoundRobinDistribution) + .value("CustomPartition", ProducerConfiguration::CustomPartition); enum_("CompressionType") - .value("NONE", CompressionNone) // Don't use 'None' since it's a keyword in py3 - .value("LZ4", CompressionLZ4) - .value("ZLib", CompressionZLib) - .value("ZSTD", CompressionZSTD) - .value("SNAPPY", CompressionSNAPPY) - ; + .value("NONE", CompressionNone) // Don't use 'None' since it's a keyword in py3 + .value("LZ4", CompressionLZ4) + .value("ZLib", CompressionZLib) + .value("ZSTD", CompressionZSTD) + .value("SNAPPY", CompressionSNAPPY); enum_("ConsumerType") - .value("Exclusive", ConsumerExclusive) - .value("Shared", ConsumerShared) - .value("Failover", ConsumerFailover) - .value("KeyShared", ConsumerKeyShared) - ; + .value("Exclusive", ConsumerExclusive) + .value("Shared", ConsumerShared) + .value("Failover", ConsumerFailover) + .value("KeyShared", ConsumerKeyShared); - enum_("Result", "Collection of return codes") - .value("Ok", ResultOk) - .value("UnknownError", ResultUnknownError) - .value("InvalidConfiguration", ResultInvalidConfiguration) - .value("Timeout", ResultTimeout) - .value("LookupError", ResultLookupError) - .value("ConnectError", ResultConnectError) - .value("ReadError", ResultReadError) - .value("AuthenticationError", ResultAuthenticationError) - .value("AuthorizationError", ResultAuthorizationError) - .value("ErrorGettingAuthenticationData", ResultErrorGettingAuthenticationData) - .value("BrokerMetadataError", ResultBrokerMetadataError) - .value("BrokerPersistenceError", ResultBrokerPersistenceError) - .value("ChecksumError", ResultChecksumError) - .value("ConsumerBusy", ResultConsumerBusy) - .value("NotConnected", ResultNotConnected) - .value("AlreadyClosed", ResultAlreadyClosed) - .value("InvalidMessage", ResultInvalidMessage) - .value("ConsumerNotInitialized", ResultConsumerNotInitialized) - .value("ProducerNotInitialized", ResultProducerNotInitialized) - .value("ProducerBusy", ResultProducerBusy) - .value("TooManyLookupRequestException", ResultTooManyLookupRequestException) - .value("InvalidTopicName", ResultInvalidTopicName) - .value("InvalidUrl", ResultInvalidUrl) - .value("ServiceUnitNotReady", ResultServiceUnitNotReady) - .value("OperationNotSupported", ResultOperationNotSupported) - .value("ProducerBlockedQuotaExceededError", ResultProducerBlockedQuotaExceededError) - .value("ProducerBlockedQuotaExceededException", ResultProducerBlockedQuotaExceededException) - .value("ProducerQueueIsFull", ResultProducerQueueIsFull) - .value("MessageTooBig", ResultMessageTooBig) - .value("TopicNotFound", ResultTopicNotFound) - .value("SubscriptionNotFound", ResultSubscriptionNotFound) - .value("ConsumerNotFound", ResultConsumerNotFound) - .value("UnsupportedVersionError", ResultUnsupportedVersionError) - .value("TopicTerminated", ResultTopicTerminated) - .value("CryptoError", ResultCryptoError) - .value("IncompatibleSchema", ResultIncompatibleSchema) - .value("ConsumerAssignError", ResultConsumerAssignError) - .value("CumulativeAcknowledgementNotAllowedError", ResultCumulativeAcknowledgementNotAllowedError) - .value("TransactionCoordinatorNotFoundError", ResultTransactionCoordinatorNotFoundError) - .value("InvalidTxnStatusError", ResultInvalidTxnStatusError) - .value("NotAllowedError", ResultNotAllowedError) - .value("TransactionConflict", ResultTransactionConflict) - .value("TransactionNotFound", ResultTransactionNotFound) - .value("ProducerFenced", ResultProducerFenced) - .value("MemoryBufferIsFull", ResultMemoryBufferIsFull) - ; + enum_("Result", "Collection of return codes") + .value("Ok", ResultOk) + .value("UnknownError", ResultUnknownError) + .value("InvalidConfiguration", ResultInvalidConfiguration) + .value("Timeout", ResultTimeout) + .value("LookupError", ResultLookupError) + .value("ConnectError", ResultConnectError) + .value("ReadError", ResultReadError) + .value("AuthenticationError", ResultAuthenticationError) + .value("AuthorizationError", ResultAuthorizationError) + .value("ErrorGettingAuthenticationData", ResultErrorGettingAuthenticationData) + .value("BrokerMetadataError", ResultBrokerMetadataError) + .value("BrokerPersistenceError", ResultBrokerPersistenceError) + .value("ChecksumError", ResultChecksumError) + .value("ConsumerBusy", ResultConsumerBusy) + .value("NotConnected", ResultNotConnected) + .value("AlreadyClosed", ResultAlreadyClosed) + .value("InvalidMessage", ResultInvalidMessage) + .value("ConsumerNotInitialized", ResultConsumerNotInitialized) + .value("ProducerNotInitialized", ResultProducerNotInitialized) + .value("ProducerBusy", ResultProducerBusy) + .value("TooManyLookupRequestException", ResultTooManyLookupRequestException) + .value("InvalidTopicName", ResultInvalidTopicName) + .value("InvalidUrl", ResultInvalidUrl) + .value("ServiceUnitNotReady", ResultServiceUnitNotReady) + .value("OperationNotSupported", ResultOperationNotSupported) + .value("ProducerBlockedQuotaExceededError", ResultProducerBlockedQuotaExceededError) + .value("ProducerBlockedQuotaExceededException", ResultProducerBlockedQuotaExceededException) + .value("ProducerQueueIsFull", ResultProducerQueueIsFull) + .value("MessageTooBig", ResultMessageTooBig) + .value("TopicNotFound", ResultTopicNotFound) + .value("SubscriptionNotFound", ResultSubscriptionNotFound) + .value("ConsumerNotFound", ResultConsumerNotFound) + .value("UnsupportedVersionError", ResultUnsupportedVersionError) + .value("TopicTerminated", ResultTopicTerminated) + .value("CryptoError", ResultCryptoError) + .value("IncompatibleSchema", ResultIncompatibleSchema) + .value("ConsumerAssignError", ResultConsumerAssignError) + .value("CumulativeAcknowledgementNotAllowedError", ResultCumulativeAcknowledgementNotAllowedError) + .value("TransactionCoordinatorNotFoundError", ResultTransactionCoordinatorNotFoundError) + .value("InvalidTxnStatusError", ResultInvalidTxnStatusError) + .value("NotAllowedError", ResultNotAllowedError) + .value("TransactionConflict", ResultTransactionConflict) + .value("TransactionNotFound", ResultTransactionNotFound) + .value("ProducerFenced", ResultProducerFenced) + .value("MemoryBufferIsFull", ResultMemoryBufferIsFull); enum_("SchemaType", "Supported schema types") - .value("NONE", pulsar::NONE) - .value("STRING", pulsar::STRING) - .value("INT8", pulsar::INT8) - .value("INT16", pulsar::INT16) - .value("INT32", pulsar::INT32) - .value("INT64", pulsar::INT64) - .value("FLOAT", pulsar::FLOAT) - .value("DOUBLE", pulsar::DOUBLE) - .value("BYTES", pulsar::BYTES) - .value("JSON", pulsar::JSON) - .value("PROTOBUF", pulsar::PROTOBUF) - .value("AVRO", pulsar::AVRO) - .value("AUTO_CONSUME", pulsar::AUTO_CONSUME) - .value("AUTO_PUBLISH", pulsar::AUTO_PUBLISH) - .value("KEY_VALUE", pulsar::KEY_VALUE) - ; + .value("NONE", pulsar::NONE) + .value("STRING", pulsar::STRING) + .value("INT8", pulsar::INT8) + .value("INT16", pulsar::INT16) + .value("INT32", pulsar::INT32) + .value("INT64", pulsar::INT64) + .value("FLOAT", pulsar::FLOAT) + .value("DOUBLE", pulsar::DOUBLE) + .value("BYTES", pulsar::BYTES) + .value("JSON", pulsar::JSON) + .value("PROTOBUF", pulsar::PROTOBUF) + .value("AVRO", pulsar::AVRO) + .value("AUTO_CONSUME", pulsar::AUTO_CONSUME) + .value("AUTO_PUBLISH", pulsar::AUTO_PUBLISH) + .value("KEY_VALUE", pulsar::KEY_VALUE); enum_("InitialPosition", "Supported initial position") - .value("Latest", InitialPositionLatest) - .value("Earliest", InitialPositionEarliest) - ; + .value("Latest", InitialPositionLatest) + .value("Earliest", InitialPositionEarliest); enum_("BatchingType", "Supported batching types") - .value("Default", ProducerConfiguration::DefaultBatching) - .value("KeyBased", ProducerConfiguration::KeyBasedBatching) - ; + .value("Default", ProducerConfiguration::DefaultBatching) + .value("KeyBased", ProducerConfiguration::KeyBasedBatching); } diff --git a/pulsar-client-cpp/python/src/exceptions.cc b/pulsar-client-cpp/python/src/exceptions.cc index c39b52d737198..25b3bd07e1cb1 100644 --- a/pulsar-client-cpp/python/src/exceptions.cc +++ b/pulsar-client-cpp/python/src/exceptions.cc @@ -29,16 +29,13 @@ PyObject* createExceptionClass(const char* name, PyObject* baseTypeObj = PyExc_E std::string fullName = "_pulsar."; fullName += name; - PyObject* typeObj = PyErr_NewException(const_cast(fullName.c_str()), - baseTypeObj, nullptr); + PyObject* typeObj = PyErr_NewException(const_cast(fullName.c_str()), baseTypeObj, nullptr); if (!typeObj) throw_error_already_set(); scope().attr(name) = handle<>(borrowed(typeObj)); return typeObj; } -PyObject* get_exception_class(Result result) { - return exceptions[result]; -} +PyObject* get_exception_class(Result result) { return exceptions[result]; } void export_exceptions() { using namespace boost::python; @@ -46,44 +43,58 @@ void export_exceptions() { basePulsarException = createExceptionClass("PulsarException"); exceptions[ResultUnknownError] = createExceptionClass("UnknownError", basePulsarException); - exceptions[ResultInvalidConfiguration] = createExceptionClass("InvalidConfiguration", basePulsarException); + exceptions[ResultInvalidConfiguration] = + createExceptionClass("InvalidConfiguration", basePulsarException); exceptions[ResultTimeout] = createExceptionClass("Timeout", basePulsarException); exceptions[ResultLookupError] = createExceptionClass("LookupError", basePulsarException); exceptions[ResultConnectError] = createExceptionClass("ConnectError", basePulsarException); exceptions[ResultReadError] = createExceptionClass("ReadError", basePulsarException); exceptions[ResultAuthenticationError] = createExceptionClass("AuthenticationError", basePulsarException); exceptions[ResultAuthorizationError] = createExceptionClass("AuthorizationError", basePulsarException); - exceptions[ResultErrorGettingAuthenticationData] = createExceptionClass("ErrorGettingAuthenticationData", basePulsarException); + exceptions[ResultErrorGettingAuthenticationData] = + createExceptionClass("ErrorGettingAuthenticationData", basePulsarException); exceptions[ResultBrokerMetadataError] = createExceptionClass("BrokerMetadataError", basePulsarException); - exceptions[ResultBrokerPersistenceError] = createExceptionClass("BrokerPersistenceError", basePulsarException); + exceptions[ResultBrokerPersistenceError] = + createExceptionClass("BrokerPersistenceError", basePulsarException); exceptions[ResultChecksumError] = createExceptionClass("ChecksumError", basePulsarException); exceptions[ResultConsumerBusy] = createExceptionClass("ConsumerBusy", basePulsarException); exceptions[ResultNotConnected] = createExceptionClass("NotConnected", basePulsarException); exceptions[ResultAlreadyClosed] = createExceptionClass("AlreadyClosed", basePulsarException); exceptions[ResultInvalidMessage] = createExceptionClass("InvalidMessage", basePulsarException); - exceptions[ResultConsumerNotInitialized] = createExceptionClass("ConsumerNotInitialized", basePulsarException); - exceptions[ResultProducerNotInitialized] = createExceptionClass("ProducerNotInitialized", basePulsarException); + exceptions[ResultConsumerNotInitialized] = + createExceptionClass("ConsumerNotInitialized", basePulsarException); + exceptions[ResultProducerNotInitialized] = + createExceptionClass("ProducerNotInitialized", basePulsarException); exceptions[ResultProducerBusy] = createExceptionClass("ProducerBusy", basePulsarException); - exceptions[ResultTooManyLookupRequestException] = createExceptionClass("TooManyLookupRequestException", basePulsarException); + exceptions[ResultTooManyLookupRequestException] = + createExceptionClass("TooManyLookupRequestException", basePulsarException); exceptions[ResultInvalidTopicName] = createExceptionClass("InvalidTopicName", basePulsarException); exceptions[ResultInvalidUrl] = createExceptionClass("InvalidUrl", basePulsarException); exceptions[ResultServiceUnitNotReady] = createExceptionClass("ServiceUnitNotReady", basePulsarException); - exceptions[ResultOperationNotSupported] = createExceptionClass("OperationNotSupported", basePulsarException); - exceptions[ResultProducerBlockedQuotaExceededError] = createExceptionClass("ProducerBlockedQuotaExceededError", basePulsarException); - exceptions[ResultProducerBlockedQuotaExceededException] = createExceptionClass("ProducerBlockedQuotaExceededException", basePulsarException); + exceptions[ResultOperationNotSupported] = + createExceptionClass("OperationNotSupported", basePulsarException); + exceptions[ResultProducerBlockedQuotaExceededError] = + createExceptionClass("ProducerBlockedQuotaExceededError", basePulsarException); + exceptions[ResultProducerBlockedQuotaExceededException] = + createExceptionClass("ProducerBlockedQuotaExceededException", basePulsarException); exceptions[ResultProducerQueueIsFull] = createExceptionClass("ProducerQueueIsFull", basePulsarException); exceptions[ResultMessageTooBig] = createExceptionClass("MessageTooBig", basePulsarException); exceptions[ResultTopicNotFound] = createExceptionClass("TopicNotFound", basePulsarException); - exceptions[ResultSubscriptionNotFound] = createExceptionClass("SubscriptionNotFound", basePulsarException); + exceptions[ResultSubscriptionNotFound] = + createExceptionClass("SubscriptionNotFound", basePulsarException); exceptions[ResultConsumerNotFound] = createExceptionClass("ConsumerNotFound", basePulsarException); - exceptions[ResultUnsupportedVersionError] = createExceptionClass("UnsupportedVersionError", basePulsarException); + exceptions[ResultUnsupportedVersionError] = + createExceptionClass("UnsupportedVersionError", basePulsarException); exceptions[ResultTopicTerminated] = createExceptionClass("TopicTerminated", basePulsarException); exceptions[ResultCryptoError] = createExceptionClass("CryptoError", basePulsarException); exceptions[ResultIncompatibleSchema] = createExceptionClass("IncompatibleSchema", basePulsarException); exceptions[ResultConsumerAssignError] = createExceptionClass("ConsumerAssignError", basePulsarException); - exceptions[ResultCumulativeAcknowledgementNotAllowedError] = createExceptionClass("CumulativeAcknowledgementNotAllowedError", basePulsarException); - exceptions[ResultTransactionCoordinatorNotFoundError] = createExceptionClass("TransactionCoordinatorNotFoundError", basePulsarException); - exceptions[ResultInvalidTxnStatusError] = createExceptionClass("InvalidTxnStatusError", basePulsarException); + exceptions[ResultCumulativeAcknowledgementNotAllowedError] = + createExceptionClass("CumulativeAcknowledgementNotAllowedError", basePulsarException); + exceptions[ResultTransactionCoordinatorNotFoundError] = + createExceptionClass("TransactionCoordinatorNotFoundError", basePulsarException); + exceptions[ResultInvalidTxnStatusError] = + createExceptionClass("InvalidTxnStatusError", basePulsarException); exceptions[ResultNotAllowedError] = createExceptionClass("NotAllowedError", basePulsarException); exceptions[ResultTransactionConflict] = createExceptionClass("TransactionConflict", basePulsarException); exceptions[ResultTransactionNotFound] = createExceptionClass("TransactionNotFound", basePulsarException); diff --git a/pulsar-client-cpp/python/src/message.cc b/pulsar-client-cpp/python/src/message.cc index 8532966648d04..b93380bc7afb9 100644 --- a/pulsar-client-cpp/python/src/message.cc +++ b/pulsar-client-cpp/python/src/message.cc @@ -28,34 +28,23 @@ std::string MessageId_str(const MessageId& msgId) { return ss.str(); } -bool MessageId_eq(const MessageId& a, const MessageId& b) { - return a == b; -} +bool MessageId_eq(const MessageId& a, const MessageId& b) { return a == b; } -bool MessageId_ne(const MessageId& a, const MessageId& b) { - return a != b; -} +bool MessageId_ne(const MessageId& a, const MessageId& b) { return a != b; } -bool MessageId_lt(const MessageId& a, const MessageId& b) { - return a < b; -} +bool MessageId_lt(const MessageId& a, const MessageId& b) { return a < b; } -bool MessageId_le(const MessageId& a, const MessageId& b) { - return a <= b; -} +bool MessageId_le(const MessageId& a, const MessageId& b) { return a <= b; } -bool MessageId_gt(const MessageId& a, const MessageId& b) { - return a > b; -} +bool MessageId_gt(const MessageId& a, const MessageId& b) { return a > b; } -bool MessageId_ge(const MessageId& a, const MessageId& b) { - return a >= b; -} +bool MessageId_ge(const MessageId& a, const MessageId& b) { return a >= b; } boost::python::object MessageId_serialize(const MessageId& msgId) { std::string serialized; msgId.serialize(serialized); - return boost::python::object(boost::python::handle<>(PyBytes_FromStringAndSize(serialized.c_str(), serialized.length()))); + return boost::python::object( + boost::python::handle<>(PyBytes_FromStringAndSize(serialized.c_str(), serialized.length()))); } std::string Message_str(const Message& msg) { @@ -65,7 +54,8 @@ std::string Message_str(const Message& msg) { } boost::python::object Message_data(const Message& msg) { - return boost::python::object(boost::python::handle<>(PyBytes_FromStringAndSize((const char*)msg.getData(), msg.getLength()))); + return boost::python::object( + boost::python::handle<>(PyBytes_FromStringAndSize((const char*)msg.getData(), msg.getLength()))); } boost::python::object Message_properties(const Message& msg) { @@ -88,9 +78,7 @@ std::string schema_version_str(const Message& msg) { return ss.str(); } -const MessageId& Message_getMessageId(const Message& msg) { - return msg.getMessageId(); -} +const MessageId& Message_getMessageId(const Message& msg) { return msg.getMessageId(); } void deliverAfter(MessageBuilder* const builder, PyObject* obj_delta) { PyDateTime_Delta const* pydelta = reinterpret_cast(obj_delta); @@ -102,12 +90,9 @@ void deliverAfter(MessageBuilder* const builder, PyObject* obj_delta) { } // Create chrono duration object - std::chrono::milliseconds - duration = std::chrono::duration_cast( - std::chrono::hours(24)*days - + std::chrono::seconds(pydelta->seconds) - + std::chrono::microseconds(pydelta->microseconds) - ); + std::chrono::milliseconds duration = std::chrono::duration_cast( + std::chrono::hours(24) * days + std::chrono::seconds(pydelta->seconds) + + std::chrono::microseconds(pydelta->microseconds)); if (is_negative) { duration = duration * -1; @@ -121,70 +106,66 @@ void export_message() { PyDateTime_IMPORT; - MessageBuilder& (MessageBuilder::*MessageBuilderSetContentString)(const std::string&) = &MessageBuilder::setContent; + MessageBuilder& (MessageBuilder::*MessageBuilderSetContentString)(const std::string&) = + &MessageBuilder::setContent; class_("MessageBuilder") - .def("content", MessageBuilderSetContentString, return_self<>()) - .def("property", &MessageBuilder::setProperty, return_self<>()) - .def("properties", &MessageBuilder::setProperties, return_self<>()) - .def("sequence_id", &MessageBuilder::setSequenceId, return_self<>()) - .def("deliver_after", &deliverAfter, return_self<>()) - .def("deliver_at", &MessageBuilder::setDeliverAt, return_self<>()) - .def("partition_key", &MessageBuilder::setPartitionKey, return_self<>()) - .def("event_timestamp", &MessageBuilder::setEventTimestamp, return_self<>()) - .def("replication_clusters", &MessageBuilder::setReplicationClusters, return_self<>()) - .def("disable_replication", &MessageBuilder::disableReplication, return_self<>()) - .def("build", &MessageBuilder::build) - ; - - class_("MessageStringMap") - .def(map_indexing_suite()) - ; + .def("content", MessageBuilderSetContentString, return_self<>()) + .def("property", &MessageBuilder::setProperty, return_self<>()) + .def("properties", &MessageBuilder::setProperties, return_self<>()) + .def("sequence_id", &MessageBuilder::setSequenceId, return_self<>()) + .def("deliver_after", &deliverAfter, return_self<>()) + .def("deliver_at", &MessageBuilder::setDeliverAt, return_self<>()) + .def("partition_key", &MessageBuilder::setPartitionKey, return_self<>()) + .def("event_timestamp", &MessageBuilder::setEventTimestamp, return_self<>()) + .def("replication_clusters", &MessageBuilder::setReplicationClusters, return_self<>()) + .def("disable_replication", &MessageBuilder::disableReplication, return_self<>()) + .def("build", &MessageBuilder::build); + + class_("MessageStringMap").def(map_indexing_suite()); static const MessageId& _MessageId_earliest = MessageId::earliest(); static const MessageId& _MessageId_latest = MessageId::latest(); class_("MessageId") - .def(init()) - .def("__str__", &MessageId_str) - .def("__eq__", &MessageId_eq) - .def("__ne__", &MessageId_ne) - .def("__le__", &MessageId_le) - .def("__lt__", &MessageId_lt) - .def("__ge__", &MessageId_ge) - .def("__gt__", &MessageId_gt) - .def("ledger_id", &MessageId::ledgerId) - .def("entry_id", &MessageId::entryId) - .def("batch_index", &MessageId::batchIndex) - .def("partition", &MessageId::partition) - .add_static_property("earliest", make_getter(&_MessageId_earliest)) - .add_static_property("latest", make_getter(&_MessageId_latest)) - .def("serialize", &MessageId_serialize) - .def("deserialize", &MessageId::deserialize).staticmethod("deserialize") - ; + .def(init()) + .def("__str__", &MessageId_str) + .def("__eq__", &MessageId_eq) + .def("__ne__", &MessageId_ne) + .def("__le__", &MessageId_le) + .def("__lt__", &MessageId_lt) + .def("__ge__", &MessageId_ge) + .def("__gt__", &MessageId_gt) + .def("ledger_id", &MessageId::ledgerId) + .def("entry_id", &MessageId::entryId) + .def("batch_index", &MessageId::batchIndex) + .def("partition", &MessageId::partition) + .add_static_property("earliest", make_getter(&_MessageId_earliest)) + .add_static_property("latest", make_getter(&_MessageId_latest)) + .def("serialize", &MessageId_serialize) + .def("deserialize", &MessageId::deserialize) + .staticmethod("deserialize"); class_("Message") - .def("properties", &Message_properties) - .def("data", &Message_data) - .def("length", &Message::getLength) - .def("partition_key", &Message::getPartitionKey, return_value_policy()) - .def("publish_timestamp", &Message::getPublishTimestamp) - .def("event_timestamp", &Message::getEventTimestamp) - .def("message_id", &Message_getMessageId, return_value_policy()) - .def("__str__", &Message_str) - .def("topic_name", &Topic_name_str) - .def("redelivery_count", &Message::getRedeliveryCount) - .def("schema_version", &schema_version_str) - ; - - MessageBatch& (MessageBatch::*MessageBatchParseFromString)(const std::string& payload, uint32_t batchSize) = &MessageBatch::parseFrom; + .def("properties", &Message_properties) + .def("data", &Message_data) + .def("length", &Message::getLength) + .def("partition_key", &Message::getPartitionKey, return_value_policy()) + .def("publish_timestamp", &Message::getPublishTimestamp) + .def("event_timestamp", &Message::getEventTimestamp) + .def("message_id", &Message_getMessageId, return_value_policy()) + .def("__str__", &Message_str) + .def("topic_name", &Topic_name_str) + .def("redelivery_count", &Message::getRedeliveryCount) + .def("schema_version", &schema_version_str); + + MessageBatch& (MessageBatch::*MessageBatchParseFromString)(const std::string& payload, + uint32_t batchSize) = &MessageBatch::parseFrom; class_("MessageBatch") - .def("with_message_id", &MessageBatch::withMessageId, return_self<>()) - .def("parse_from", MessageBatchParseFromString, return_self<>()) - .def("messages", &MessageBatch::messages, return_value_policy()) - ; + .def("with_message_id", &MessageBatch::withMessageId, return_self<>()) + .def("parse_from", MessageBatchParseFromString, return_self<>()) + .def("messages", &MessageBatch::messages, return_value_policy()); - class_ >("Messages") - .def(vector_indexing_suite >() ); + class_ >("Messages").def(vector_indexing_suite >()); } diff --git a/pulsar-client-cpp/python/src/producer.cc b/pulsar-client-cpp/python/src/producer.cc index 343650f4b240f..345639e2d951a 100644 --- a/pulsar-client-cpp/python/src/producer.cc +++ b/pulsar-client-cpp/python/src/producer.cc @@ -25,11 +25,10 @@ extern boost::python::object MessageId_serialize(const MessageId& msgId); boost::python::object Producer_send(Producer& producer, const Message& message) { Result res; MessageId messageId; - Py_BEGIN_ALLOW_THREADS - res = producer.send(message, messageId); + Py_BEGIN_ALLOW_THREADS res = producer.send(message, messageId); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return MessageId_serialize(messageId); } @@ -54,57 +53,55 @@ void Producer_sendAsync(Producer& producer, const Message& message, py::object c PyObject* pyCallback = callback.ptr(); Py_XINCREF(pyCallback); - Py_BEGIN_ALLOW_THREADS - producer.sendAsync(message, std::bind(Producer_sendAsyncCallback, pyCallback, - std::placeholders::_1, std::placeholders::_2)); + Py_BEGIN_ALLOW_THREADS producer.sendAsync( + message, + std::bind(Producer_sendAsyncCallback, pyCallback, std::placeholders::_1, std::placeholders::_2)); Py_END_ALLOW_THREADS } void Producer_flush(Producer& producer) { Result res; - Py_BEGIN_ALLOW_THREADS - res = producer.flush(); + Py_BEGIN_ALLOW_THREADS res = producer.flush(); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void Producer_close(Producer& producer) { Result res; - Py_BEGIN_ALLOW_THREADS - res = producer.close(); + Py_BEGIN_ALLOW_THREADS res = producer.close(); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void export_producer() { using namespace boost::python; class_("Producer", no_init) - .def("topic", &Producer::getTopic, "return the topic to which producer is publishing to", - return_value_policy()) - .def("producer_name", &Producer::getProducerName, - "return the producer name which could have been assigned by the system or specified by the client", - return_value_policy()) - .def("last_sequence_id", &Producer::getLastSequenceId) - .def("send", &Producer_send, - "Publish a message on the topic associated with this Producer.\n" - "\n" - "This method will block until the message will be accepted and persisted\n" - "by the broker. In case of errors, the client library will try to\n" - "automatically recover and use a different broker.\n" - "\n" - "If it wasn't possible to successfully publish the message within the sendTimeout,\n" - "an error will be returned.\n" - "\n" - "This method is equivalent to asyncSend() and wait until the callback is triggered.\n" - "\n" - "@param msg message to publish\n") - .def("send_async", &Producer_sendAsync) - .def("flush", &Producer_flush, - "Flush all the messages buffered in the client and wait until all messages have been\n" - "successfully persisted\n") - .def("close", &Producer_close) - ; + .def("topic", &Producer::getTopic, "return the topic to which producer is publishing to", + return_value_policy()) + .def("producer_name", &Producer::getProducerName, + "return the producer name which could have been assigned by the system or specified by the " + "client", + return_value_policy()) + .def("last_sequence_id", &Producer::getLastSequenceId) + .def("send", &Producer_send, + "Publish a message on the topic associated with this Producer.\n" + "\n" + "This method will block until the message will be accepted and persisted\n" + "by the broker. In case of errors, the client library will try to\n" + "automatically recover and use a different broker.\n" + "\n" + "If it wasn't possible to successfully publish the message within the sendTimeout,\n" + "an error will be returned.\n" + "\n" + "This method is equivalent to asyncSend() and wait until the callback is triggered.\n" + "\n" + "@param msg message to publish\n") + .def("send_async", &Producer_sendAsync) + .def("flush", &Producer_flush, + "Flush all the messages buffered in the client and wait until all messages have been\n" + "successfully persisted\n") + .def("close", &Producer_close); } diff --git a/pulsar-client-cpp/python/src/pulsar.cc b/pulsar-client-cpp/python/src/pulsar.cc index a46ce53692d07..e591b738728c5 100644 --- a/pulsar-client-cpp/python/src/pulsar.cc +++ b/pulsar-client-cpp/python/src/pulsar.cc @@ -32,7 +32,6 @@ void export_exceptions(); PyObject* get_exception_class(Result result); - static void translateException(const PulsarException& ex) { std::string err = "Pulsar error: "; err += strResult(ex._result); @@ -40,8 +39,7 @@ static void translateException(const PulsarException& ex) { PyErr_SetString(get_exception_class(ex._result), err.c_str()); } -BOOST_PYTHON_MODULE(_pulsar) -{ +BOOST_PYTHON_MODULE(_pulsar) { py::register_exception_translator(translateException); // Initialize thread support so that we can grab the GIL mutex diff --git a/pulsar-client-cpp/python/src/reader.cc b/pulsar-client-cpp/python/src/reader.cc index fec65da270dd6..668fb9499ed7f 100644 --- a/pulsar-client-cpp/python/src/reader.cc +++ b/pulsar-client-cpp/python/src/reader.cc @@ -24,12 +24,12 @@ Message Reader_readNext(Reader& reader) { while (true) { Py_BEGIN_ALLOW_THREADS - // Use 100ms timeout to periodically check whether the - // interpreter was interrupted - res = reader.readNext(msg, 100); + // Use 100ms timeout to periodically check whether the + // interpreter was interrupted + res = reader.readNext(msg, 100); Py_END_ALLOW_THREADS - if (res != ResultTimeout) { + if (res != ResultTimeout) { // In case of timeout we keep calling receive() to simulate a // blocking call until a message is available, while breaking // every once in a while to check the Python signal status @@ -49,62 +49,56 @@ Message Reader_readNext(Reader& reader) { Message Reader_readNextTimeout(Reader& reader, int timeoutMs) { Message msg; Result res; - Py_BEGIN_ALLOW_THREADS - res = reader.readNext(msg, timeoutMs); + Py_BEGIN_ALLOW_THREADS res = reader.readNext(msg, timeoutMs); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return msg; } bool Reader_hasMessageAvailable(Reader& reader) { bool available = false; Result res; - Py_BEGIN_ALLOW_THREADS - res = reader.hasMessageAvailable(available); + Py_BEGIN_ALLOW_THREADS res = reader.hasMessageAvailable(available); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); return available; } void Reader_close(Reader& reader) { Result res; - Py_BEGIN_ALLOW_THREADS - res = reader.close(); + Py_BEGIN_ALLOW_THREADS res = reader.close(); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void Reader_seek(Reader& reader, const MessageId& msgId) { Result res; - Py_BEGIN_ALLOW_THREADS - res = reader.seek(msgId); + Py_BEGIN_ALLOW_THREADS res = reader.seek(msgId); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void Reader_seek_timestamp(Reader& reader, uint64_t timestamp) { Result res; - Py_BEGIN_ALLOW_THREADS - res = reader.seek(timestamp); + Py_BEGIN_ALLOW_THREADS res = reader.seek(timestamp); Py_END_ALLOW_THREADS - CHECK_RESULT(res); + CHECK_RESULT(res); } void export_reader() { using namespace boost::python; class_("Reader", no_init) - .def("topic", &Reader::getTopic, return_value_policy()) - .def("read_next", &Reader_readNext) - .def("read_next", &Reader_readNextTimeout) - .def("has_message_available", &Reader_hasMessageAvailable) - .def("close", &Reader_close) - .def("seek", &Reader_seek) - .def("seek", &Reader_seek_timestamp) - ; + .def("topic", &Reader::getTopic, return_value_policy()) + .def("read_next", &Reader_readNext) + .def("read_next", &Reader_readNextTimeout) + .def("has_message_available", &Reader_hasMessageAvailable) + .def("close", &Reader_close) + .def("seek", &Reader_seek) + .def("seek", &Reader_seek_timestamp); } diff --git a/pulsar-client-cpp/python/src/schema.cc b/pulsar-client-cpp/python/src/schema.cc index 397ec658d23da..cdfcda6aff14b 100644 --- a/pulsar-client-cpp/python/src/schema.cc +++ b/pulsar-client-cpp/python/src/schema.cc @@ -21,10 +21,8 @@ void export_schema() { using namespace boost::python; - class_("SchemaInfo", - init()) - .def("schema_type", &SchemaInfo::getSchemaType) - .def("name", &SchemaInfo::getName, return_value_policy()) - .def("schema", &SchemaInfo::getSchema, return_value_policy()) - ; + class_("SchemaInfo", init()) + .def("schema_type", &SchemaInfo::getSchemaType) + .def("name", &SchemaInfo::getName, return_value_policy()) + .def("schema", &SchemaInfo::getSchema, return_value_policy()); } diff --git a/pulsar-client-cpp/python/src/utils.h b/pulsar-client-cpp/python/src/utils.h index 457d1f85382b0..5be44732fb704 100644 --- a/pulsar-client-cpp/python/src/utils.h +++ b/pulsar-client-cpp/python/src/utils.h @@ -27,8 +27,7 @@ namespace py = boost::python; struct PulsarException { Result _result; - PulsarException(Result res) : - _result(res) {} + PulsarException(Result res) : _result(res) {} }; inline void CHECK_RESULT(Result res) { diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionBufferProviderException.java b/pulsar-client-cpp/templates/Version.h.in similarity index 74% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionBufferProviderException.java rename to pulsar-client-cpp/templates/Version.h.in index e7a7a62cc43af..d52121ac8c19e 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionBufferProviderException.java +++ b/pulsar-client-cpp/templates/Version.h.in @@ -16,15 +16,13 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; /** - * Transaction buffer provider exception. + * @PVM_COMMENT@ */ -public class TransactionBufferProviderException extends TransactionBufferException { +#ifndef PULSAR_VERSION_H_ +#define PULSAR_VERSION_H_ - public TransactionBufferProviderException(String message) { - super(message); - } +#define PULSAR_VERSION @PVM@ -} +#endif /* PULSAR_VERSION_H_ */ diff --git a/pulsar-client-cpp/tests/AuthPluginTest.cc b/pulsar-client-cpp/tests/AuthPluginTest.cc index be987e07c4860..01c19ebbea484 100644 --- a/pulsar-client-cpp/tests/AuthPluginTest.cc +++ b/pulsar-client-cpp/tests/AuthPluginTest.cc @@ -412,6 +412,26 @@ TEST(AuthPluginTest, testOauth2RequestBody) { ASSERT_EQ(flow2.generateParamMap(), expectedResult2); } +TEST(AuthPluginTest, testInitialize) { + std::string issuerUrl = "https://dev-kt-aa9ne.us.auth0.com"; + std::string expectedTokenEndPoint = issuerUrl + "/oauth/token"; + + ParamMap params; + params["issuer_url"] = issuerUrl; + params["client_id"] = "Xd23RHsUnvUlP7wchjNYOaIfazgeHd9x"; + params["client_secret"] = "rT7ps7WY8uhdVuBTKWZkttwLdQotmdEliaM5rLfmgNibvqziZ-g07ZH52N_poGAb"; + params["audience"] = "https://dev-kt-aa9ne.us.auth0.com/api/v2/"; + + ClientCredentialFlow flow1(params); + flow1.initialize(); + ASSERT_EQ(flow1.getTokenEndPoint(), expectedTokenEndPoint); + + params["issuer_url"] = issuerUrl + "/"; + ClientCredentialFlow flow2(params); + flow2.initialize(); + ASSERT_EQ(flow2.getTokenEndPoint(), expectedTokenEndPoint); +} + TEST(AuthPluginTest, testOauth2Failure) { ParamMap params; auto addKeyValue = [&](const std::string& key, const std::string& value) { diff --git a/pulsar-client-cpp/tests/AuthTokenTest.cc b/pulsar-client-cpp/tests/AuthTokenTest.cc index 1b136b093e5f1..ede5e818d3da6 100644 --- a/pulsar-client-cpp/tests/AuthTokenTest.cc +++ b/pulsar-client-cpp/tests/AuthTokenTest.cc @@ -176,11 +176,11 @@ TEST(AuthPluginToken, testNoAuth) { Producer producer; Result result = client.createProducer(topicName, producer); - ASSERT_EQ(ResultConnectError, result); + ASSERT_EQ(ResultAuthorizationError, result); Consumer consumer; result = client.subscribe(topicName, subName, consumer); - ASSERT_EQ(ResultConnectError, result); + ASSERT_EQ(ResultAuthorizationError, result); } TEST(AuthPluginToken, testNoAuthWithHttp) { diff --git a/pulsar-client-cpp/tests/BasicEndToEndTest.cc b/pulsar-client-cpp/tests/BasicEndToEndTest.cc index be6577523ed7d..b8cfcdb0fd0b5 100644 --- a/pulsar-client-cpp/tests/BasicEndToEndTest.cc +++ b/pulsar-client-cpp/tests/BasicEndToEndTest.cc @@ -1708,7 +1708,7 @@ TEST(BasicEndToEndTest, testSeekOnPartitionedTopic) { ASSERT_EQ(expected.str(), msgReceived.getDataAsString()); ASSERT_EQ(ResultOk, consumer.acknowledge(msgReceived)); ASSERT_EQ(ResultOk, consumer.unsubscribe()); - ASSERT_EQ(ResultOk, consumer.close()); + ASSERT_EQ(ResultAlreadyClosed, consumer.close()); ASSERT_EQ(ResultOk, producer.close()); ASSERT_EQ(ResultOk, client.close()); } diff --git a/pulsar-client-cpp/tests/BatchMessageTest.cc b/pulsar-client-cpp/tests/BatchMessageTest.cc index ebe6bf159ba63..62fd5fff25c2d 100644 --- a/pulsar-client-cpp/tests/BatchMessageTest.cc +++ b/pulsar-client-cpp/tests/BatchMessageTest.cc @@ -1071,3 +1071,81 @@ TEST(BatchMessageTest, testProducerQueueWithBatches) { ASSERT_EQ(rejectedMessges, 10); } + +TEST(BatchMessageTest, testSingleMessageMetadata) { + const auto topic = "BatchMessageTest-SingleMessageMetadata-" + std::to_string(time(nullptr)); + constexpr int numMessages = 3; + + Client client(lookupUrl); + + Consumer consumer; + ASSERT_EQ(ResultOk, client.subscribe(topic, "sub", consumer)); + + Producer producer; + ASSERT_EQ(ResultOk, client.createProducer( + topic, ProducerConfiguration().setBatchingMaxMessages(numMessages), producer)); + + producer.sendAsync(MessageBuilder() + .setContent("msg-0") + .setPartitionKey("key-0") + .setOrderingKey("ordering-key-0") + .setEventTimestamp(10UL) + .setProperty("k0", "v0") + .setProperty("k1", "v1") + .build(), + nullptr); + producer.sendAsync(MessageBuilder() + .setContent("msg-1") + .setOrderingKey("ordering-key-1") + .setEventTimestamp(11UL) + .setProperty("k2", "v2") + .build(), + nullptr); + producer.sendAsync(MessageBuilder().setContent("msg-2").build(), nullptr); + ASSERT_EQ(ResultOk, producer.flush()); + + Message msgs[numMessages]; + for (int i = 0; i < numMessages; i++) { + Message msg; + ASSERT_EQ(ResultOk, consumer.receive(msg, 3000)); + msgs[i] = msg; + LOG_INFO("message " << i << ": " << msg.getDataAsString() + << ", key: " << (msg.hasPartitionKey() ? msg.getPartitionKey() : "(null)") + << ", ordering key: " << (msg.hasOrderingKey() ? msg.getOrderingKey() : "(null)") + << ", event time: " << (msg.getEventTimestamp()) + << ", properties count: " << msg.getProperties().size() + << ", has schema version: " << msg.hasSchemaVersion()); + } + + ASSERT_EQ(msgs[0].getDataAsString(), "msg-0"); + ASSERT_TRUE(msgs[0].hasPartitionKey()); + ASSERT_EQ(msgs[0].getPartitionKey(), "key-0"); + ASSERT_TRUE(msgs[0].hasOrderingKey()); + ASSERT_EQ(msgs[0].getOrderingKey(), "ordering-key-0"); + ASSERT_EQ(msgs[0].getEventTimestamp(), 10UL); + ASSERT_EQ(msgs[0].getProperties().size(), 2); + ASSERT_TRUE(msgs[0].hasProperty("k0")); + ASSERT_EQ(msgs[0].getProperty("k0"), "v0"); + ASSERT_TRUE(msgs[0].hasProperty("k1")); + ASSERT_EQ(msgs[0].getProperty("k1"), "v1"); + ASSERT_FALSE(msgs[0].hasSchemaVersion()); + + ASSERT_EQ(msgs[1].getDataAsString(), "msg-1"); + ASSERT_FALSE(msgs[1].hasPartitionKey()); + ASSERT_TRUE(msgs[1].hasOrderingKey()); + ASSERT_EQ(msgs[1].getOrderingKey(), "ordering-key-1"); + ASSERT_EQ(msgs[1].getEventTimestamp(), 11UL); + ASSERT_EQ(msgs[1].getProperties().size(), 1); + ASSERT_TRUE(msgs[1].hasProperty("k2")); + ASSERT_EQ(msgs[1].getProperty("k2"), "v2"); + ASSERT_FALSE(msgs[1].hasSchemaVersion()); + + ASSERT_EQ(msgs[2].getDataAsString(), "msg-2"); + ASSERT_FALSE(msgs[2].hasPartitionKey()); + ASSERT_FALSE(msgs[2].hasOrderingKey()); + ASSERT_EQ(msgs[2].getEventTimestamp(), 0UL); + ASSERT_EQ(msgs[2].getProperties().size(), 0); + ASSERT_FALSE(msgs[2].hasSchemaVersion()); + + client.close(); +} diff --git a/pulsar-client-cpp/tests/ClientTest.cc b/pulsar-client-cpp/tests/ClientTest.cc index 8f5e68b84a18d..2bc66c6da2be6 100644 --- a/pulsar-client-cpp/tests/ClientTest.cc +++ b/pulsar-client-cpp/tests/ClientTest.cc @@ -19,10 +19,14 @@ #include #include "HttpHelper.h" +#include "PulsarFriend.h" #include #include #include "../lib/checksum/ChecksumProvider.h" +#include "lib/LogUtils.h" + +DECLARE_LOG_OBJECT() using namespace pulsar; @@ -176,3 +180,91 @@ TEST(ClientTest, testGetNumberOfReferences) { client.close(); } + +TEST(ClientTest, testReferenceCount) { + Client client(lookupUrl); + const std::string topic = "client-test-reference-count-" + std::to_string(time(nullptr)); + + auto &producers = PulsarFriend::getProducers(client); + auto &consumers = PulsarFriend::getConsumers(client); + ReaderImplWeakPtr readerWeakPtr; + + { + Producer producer; + ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); + ASSERT_EQ(producers.size(), 1); + ASSERT_TRUE(producers[0].use_count() > 0); + LOG_INFO("Reference count of the producer: " << producers[0].use_count()); + + Consumer consumer; + ASSERT_EQ(ResultOk, client.subscribe(topic, "my-sub", consumer)); + ASSERT_EQ(consumers.size(), 1); + ASSERT_TRUE(consumers[0].use_count() > 0); + LOG_INFO("Reference count of the consumer: " << consumers[0].use_count()); + + ReaderConfiguration readerConf; + Reader reader; + ASSERT_EQ(ResultOk, + client.createReader(topic + "-reader", MessageId::earliest(), readerConf, reader)); + ASSERT_EQ(consumers.size(), 2); + ASSERT_TRUE(consumers[1].use_count() > 0); + LOG_INFO("Reference count of the reader's underlying consumer: " << consumers[1].use_count()); + + readerWeakPtr = PulsarFriend::getReaderImplWeakPtr(reader); + ASSERT_TRUE(readerWeakPtr.use_count() > 0); + LOG_INFO("Reference count of the reader: " << readerWeakPtr.use_count()); + } + + ASSERT_EQ(producers.size(), 1); + ASSERT_EQ(producers[0].use_count(), 0); + ASSERT_EQ(consumers.size(), 2); + ASSERT_EQ(consumers[0].use_count(), 0); + ASSERT_EQ(consumers[1].use_count(), 0); + ASSERT_EQ(readerWeakPtr.use_count(), 0); + client.close(); +} + +TEST(ClientTest, testWrongListener) { + const std::string topic = "client-test-wrong-listener-" + std::to_string(time(nullptr)); + auto httpCode = makePutRequest( + "http://localhost:8080/admin/v2/persistent/public/default/" + topic + "/partitions", "3"); + LOG_INFO("create " << topic << ": " << httpCode); + + Client client(lookupUrl, ClientConfiguration().setListenerName("test")); + Producer producer; + ASSERT_EQ(ResultServiceUnitNotReady, client.createProducer(topic, producer)); + ASSERT_EQ(ResultProducerNotInitialized, producer.close()); + ASSERT_EQ(PulsarFriend::getProducers(client).size(), 0); + ASSERT_EQ(ResultOk, client.close()); + + // The connection will be closed when the consumer failed, we must recreate the Client. Otherwise, the + // creation of Consumer or Reader could fail with ResultConnectError. + client = Client(lookupUrl, ClientConfiguration().setListenerName("test")); + Consumer consumer; + ASSERT_EQ(ResultServiceUnitNotReady, client.subscribe(topic, "sub", consumer)); + ASSERT_EQ(ResultConsumerNotInitialized, consumer.close()); + + ASSERT_EQ(PulsarFriend::getConsumers(client).size(), 0); + ASSERT_EQ(ResultOk, client.close()); + + client = Client(lookupUrl, ClientConfiguration().setListenerName("test")); + + Consumer multiTopicsConsumer; + ASSERT_EQ(ResultServiceUnitNotReady, + client.subscribe({topic + "-partition-0", topic + "-partition-1", topic + "-partition-2"}, + "sub", multiTopicsConsumer)); + + ASSERT_EQ(PulsarFriend::getConsumers(client).size(), 0); + ASSERT_EQ(ResultOk, client.close()); + + // Currently Reader can only read a non-partitioned topic in C++ client + client = Client(lookupUrl, ClientConfiguration().setListenerName("test")); + + // Currently Reader can only read a non-partitioned topic in C++ client + Reader reader; + ASSERT_EQ(ResultServiceUnitNotReady, + client.createReader(topic + "-partition-0", MessageId::earliest(), {}, reader)); + ASSERT_EQ(ResultConsumerNotInitialized, reader.close()); + ASSERT_EQ(PulsarFriend::getConsumers(client).size(), 0); + ASSERT_EQ(ResultOk, client.close()); +} diff --git a/pulsar-client-cpp/tests/ConsumerStatsTest.cc b/pulsar-client-cpp/tests/ConsumerStatsTest.cc index ada86760a2a02..c398a532e68cc 100644 --- a/pulsar-client-cpp/tests/ConsumerStatsTest.cc +++ b/pulsar-client-cpp/tests/ConsumerStatsTest.cc @@ -20,15 +20,12 @@ #include #include #include -#include "CustomRoutingPolicy.h" #include "lib/Future.h" #include "lib/Utils.h" #include "PulsarFriend.h" #include "ConsumerTest.h" #include "HttpHelper.h" #include -#include -#include #include #include @@ -42,8 +39,8 @@ static std::string adminUrl = "http://localhost:8080/"; void partitionedCallbackFunction(Result result, BrokerConsumerStats brokerConsumerStats, long expectedBacklog, Latch& latch, int index, bool accurate) { ASSERT_EQ(result, ResultOk); - PartitionedBrokerConsumerStatsImpl* statsPtr = - (PartitionedBrokerConsumerStatsImpl*)(brokerConsumerStats.getImpl().get()); + MultiTopicsBrokerConsumerStatsImpl* statsPtr = + (MultiTopicsBrokerConsumerStatsImpl*)(brokerConsumerStats.getImpl().get()); LOG_DEBUG(statsPtr); if (accurate) { ASSERT_EQ(expectedBacklog, statsPtr->getBrokerConsumerStats(index).getMsgBacklog()); diff --git a/pulsar-client-cpp/tests/ConsumerTest.cc b/pulsar-client-cpp/tests/ConsumerTest.cc index 2747b2dcedff8..b1fc11cec8fe3 100644 --- a/pulsar-client-cpp/tests/ConsumerTest.cc +++ b/pulsar-client-cpp/tests/ConsumerTest.cc @@ -30,7 +30,6 @@ #include "lib/Future.h" #include "lib/Utils.h" #include "lib/LogUtils.h" -#include "lib/PartitionedConsumerImpl.h" #include "lib/MultiTopicsConsumerImpl.h" #include "HttpHelper.h" @@ -406,8 +405,9 @@ TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery) { consumerConfig.setUnAckedMessagesTimeoutMs(unAckedMessagesTimeoutMs); consumerConfig.setTickDurationInMs(tickDurationInMs); ASSERT_EQ(ResultOk, client.subscribe(partitionedTopic, subName, consumerConfig, consumer)); - PartitionedConsumerImplPtr partitionedConsumerImplPtr = - PulsarFriend::getPartitionedConsumerImplPtr(consumer); + + MultiTopicsConsumerImplPtr partitionedConsumerImplPtr = + PulsarFriend::getMultiTopicsConsumerImplPtr(consumer); ASSERT_EQ(numPartitions, partitionedConsumerImplPtr->consumers_.size()); // send messages @@ -442,8 +442,10 @@ TEST(ConsumerTest, testPartitionedConsumerUnAckedMessageRedelivery) { ASSERT_EQ(numOfMessages, partitionedTracker->size()); ASSERT_FALSE(partitionedTracker->isEmpty()); for (auto i = 0; i < numPartitions; i++) { + auto topicName = + "persistent://public/default/" + partitionedTopic + "-partition-" + std::to_string(i); ASSERT_EQ(numOfMessages / numPartitions, messageIds[i].size()); - auto subConsumerPtr = partitionedConsumerImplPtr->consumers_[i]; + auto subConsumerPtr = partitionedConsumerImplPtr->consumers_.find(topicName).value(); auto tracker = static_cast(subConsumerPtr->unAckedMessageTrackerPtr_.get()); ASSERT_EQ(0, tracker->size()); @@ -530,11 +532,14 @@ TEST(ConsumerTest, testMultiTopicsConsumerUnAckedMessageRedelivery) { multiTopicsConsumerImplPtr->unAckedMessageTrackerPtr_.get()); ASSERT_EQ(numOfMessages * 3, multiTopicsTracker->size()); ASSERT_FALSE(multiTopicsTracker->isEmpty()); - for (auto iter = multiTopicsConsumerImplPtr->consumers_.begin(); - iter != multiTopicsConsumerImplPtr->consumers_.end(); ++iter) { - auto subConsumerPtr = iter->second; - auto tracker = - static_cast(subConsumerPtr->unAckedMessageTrackerPtr_.get()); + + std::vector trackers; + multiTopicsConsumerImplPtr->consumers_.forEach( + [&trackers](const std::string& name, const ConsumerImplPtr& consumer) { + trackers.emplace_back( + static_cast(consumer->unAckedMessageTrackerPtr_.get())); + }); + for (const auto& tracker : trackers) { ASSERT_EQ(0, tracker->size()); ASSERT_TRUE(tracker->isEmpty()); } @@ -660,7 +665,7 @@ TEST(ConsumerTest, testGetTopicNameFromReceivedMessage) { // 2. MultiTopicsConsumerImpl Consumer consumer2; - ASSERT_EQ(ResultOk, client.subscribe({topic1, topic2}, "sub-2", consumer2)); + ASSERT_EQ(ResultOk, client.subscribe(std::vector{topic1, topic2}, "sub-2", consumer2)); sendMessage(topic1, true); validateTopicName(consumer1, topic1); diff --git a/pulsar-client-cpp/tests/CustomLoggerTest.cc b/pulsar-client-cpp/tests/CustomLoggerTest.cc index 0b4e76adcc4bb..bd80c312e3ba8 100644 --- a/pulsar-client-cpp/tests/CustomLoggerTest.cc +++ b/pulsar-client-cpp/tests/CustomLoggerTest.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include using namespace pulsar; @@ -28,35 +29,42 @@ static std::vector logLines; class MyTestLogger : public Logger { public: - MyTestLogger() = default; + MyTestLogger(const std::string &fileName) : fileName_(fileName) {} bool isEnabled(Level level) override { return true; } void log(Level level, int line, const std::string &message) override { std::stringstream ss; - ss << " " << level << ":" << line << " " << message << std::endl; + ss << std::this_thread::get_id() << " " << level << " " << fileName_ << ":" << line << " " << message + << std::endl; logLines.emplace_back(ss.str()); } + + private: + const std::string fileName_; }; class MyTestLoggerFactory : public LoggerFactory { public: - Logger *getLogger(const std::string &fileName) override { return logger; } - - private: - MyTestLogger *logger = new MyTestLogger; + Logger *getLogger(const std::string &fileName) override { return new MyTestLogger(fileName); } }; TEST(CustomLoggerTest, testCustomLogger) { // simulate new client created on a different thread (because logging factory is called once per thread) - auto testThread = std::thread([] { + std::atomic_int numLogLines{0}; + auto testThread = std::thread([&numLogLines] { ClientConfiguration clientConfig; auto customLogFactory = new MyTestLoggerFactory(); clientConfig.setLogger(customLogFactory); // reset to previous log factory Client client("pulsar://localhost:6650", clientConfig); client.close(); - ASSERT_EQ(logLines.size(), 7); + ASSERT_TRUE(logLines.size() > 0); + for (auto &&line : logLines) { + std::cout << line; + std::cout.flush(); + } + numLogLines = logLines.size(); LogUtils::resetLoggerFactory(); }); testThread.join(); @@ -65,7 +73,7 @@ TEST(CustomLoggerTest, testCustomLogger) { Client client("pulsar://localhost:6650", clientConfig); client.close(); // custom logger didn't get any new lines - ASSERT_EQ(logLines.size(), 7); + ASSERT_EQ(logLines.size(), numLogLines); } TEST(CustomLoggerTest, testConsoleLoggerFactory) { diff --git a/pulsar-client-cpp/tests/KeyBasedBatchingTest.cc b/pulsar-client-cpp/tests/KeyBasedBatchingTest.cc index 3bec21ac3d738..fcb558a7dadb9 100644 --- a/pulsar-client-cpp/tests/KeyBasedBatchingTest.cc +++ b/pulsar-client-cpp/tests/KeyBasedBatchingTest.cc @@ -41,7 +41,6 @@ class KeyBasedBatchingTest : public ::testing::Test { void TearDown() override { client_.close(); } - void setTopicName(const std::string& topicName) { topicName_ = topicName; } void initTopicName(const std::string& testName) { topicName_ = "KeyBasedBatchingTest-" + testName + "-" + std::to_string(time(nullptr)); } @@ -179,3 +178,34 @@ TEST_F(KeyBasedBatchingTest, testSingleBatch) { ASSERT_EQ(ResultTimeout, consumer_.receive(msg, 3000)); ASSERT_EQ(numMessageSent.load(), numMessages); } + +TEST_F(KeyBasedBatchingTest, testCloseBeforeSend) { + initTopicName("CloseBeforeSend"); + // Any asynchronous send won't be completed unless `close()` or `flush()` is triggered + initProducer(createDefaultProducerConfig().setBatchingMaxMessages(static_cast(-1))); + + std::mutex mtx; + std::vector results; + auto saveResult = [&mtx, &results](Result result) { + std::lock_guard lock(mtx); + results.emplace_back(result); + }; + auto sendAsync = [saveResult, this](const std::string& key, const std::string& value) { + producer_.sendAsync(MessageBuilder().setOrderingKey(key).setContent(value).build(), + [saveResult](Result result, const MessageId& id) { saveResult(result); }); + }; + + constexpr int numKeys = 10; + for (int i = 0; i < numKeys; i++) { + sendAsync("key-" + std::to_string(i), "value"); + } + + ASSERT_EQ(ResultOk, producer_.close()); + + // After close() completed, all callbacks should have failed with ResultAlreadyClosed + std::lock_guard lock(mtx); + ASSERT_EQ(results.size(), numKeys); + for (int i = 0; i < numKeys; i++) { + ASSERT_EQ(results[i], ResultAlreadyClosed) << " results[" << i << "] is " << results[i]; + } +} diff --git a/pulsar-client-cpp/tests/MessageIdTest.cc b/pulsar-client-cpp/tests/MessageIdTest.cc index 06c25283794aa..55fa181da05f4 100644 --- a/pulsar-client-cpp/tests/MessageIdTest.cc +++ b/pulsar-client-cpp/tests/MessageIdTest.cc @@ -17,6 +17,7 @@ * under the License. */ #include +#include "lib/MessageIdUtil.h" #include "PulsarFriend.h" #include @@ -35,3 +36,24 @@ TEST(MessageIdTest, testSerialization) { ASSERT_EQ(msgId, deserialized); } + +TEST(MessageIdTest, testCompareLedgerAndEntryId) { + MessageId id1(-1, 2L, 1L, 0); + MessageId id2(-1, 2L, 1L, 1); + MessageId id3(-1, 2L, 2L, 0); + MessageId id4(-1, 3L, 0L, 0); + ASSERT_EQ(compareLedgerAndEntryId(id1, id2), 0); + ASSERT_EQ(compareLedgerAndEntryId(id1, id2), 0); + + ASSERT_EQ(compareLedgerAndEntryId(id1, id3), -1); + ASSERT_EQ(compareLedgerAndEntryId(id3, id1), 1); + + ASSERT_EQ(compareLedgerAndEntryId(id1, id4), -1); + ASSERT_EQ(compareLedgerAndEntryId(id4, id1), 1); + + ASSERT_EQ(compareLedgerAndEntryId(id2, id4), -1); + ASSERT_EQ(compareLedgerAndEntryId(id4, id2), 1); + + ASSERT_EQ(compareLedgerAndEntryId(id3, id4), -1); + ASSERT_EQ(compareLedgerAndEntryId(id4, id3), 1); +} diff --git a/pulsar-client-cpp/tests/PeriodicTaskTest.cc b/pulsar-client-cpp/tests/PeriodicTaskTest.cc index 11c1c62ec3f2b..2c1da70e80e3c 100644 --- a/pulsar-client-cpp/tests/PeriodicTaskTest.cc +++ b/pulsar-client-cpp/tests/PeriodicTaskTest.cc @@ -29,11 +29,11 @@ DECLARE_LOG_OBJECT() using namespace pulsar; TEST(PeriodicTaskTest, testCountdownTask) { - ExecutorService executor; + auto executor = ExecutorService::create(); std::atomic_int count{5}; - auto task = std::make_shared(executor.getIOService(), 200); + auto task = std::make_shared(executor->getIOService(), 200); task->setCallback([task, &count](const PeriodicTask::ErrorCode& ec) { if (--count <= 0) { task->stop(); @@ -56,13 +56,13 @@ TEST(PeriodicTaskTest, testCountdownTask) { ASSERT_EQ(count.load(), 0); task->stop(); - executor.close(); + executor->close(); } TEST(PeriodicTaskTest, testNegativePeriod) { - ExecutorService executor; + auto executor = ExecutorService::create(); - auto task = std::make_shared(executor.getIOService(), -1); + auto task = std::make_shared(executor->getIOService(), -1); std::atomic_bool callbackTriggered{false}; task->setCallback([&callbackTriggered](const PeriodicTask::ErrorCode& ec) { callbackTriggered = true; }); @@ -71,5 +71,5 @@ TEST(PeriodicTaskTest, testNegativePeriod) { ASSERT_EQ(callbackTriggered.load(), false); task->stop(); - executor.close(); + executor->close(); } diff --git a/pulsar-client-cpp/tests/ProducerTest.cc b/pulsar-client-cpp/tests/ProducerTest.cc index 210f01345d4af..9ddca1f704294 100644 --- a/pulsar-client-cpp/tests/ProducerTest.cc +++ b/pulsar-client-cpp/tests/ProducerTest.cc @@ -159,85 +159,46 @@ TEST(ProducerTest, testSendAsyncAfterCloseAsyncWithLazyProducers) { ASSERT_EQ(ResultOk, result); } -TEST(ProducerTest, testSendAsyncCloseAsyncConcurrentlyWithLazyProducers) { - // run sendAsync and closeAsync concurrently and verify that all sendAsync callbacks are called - // and that messages sent after closeAsync is invoked receive ResultAlreadyClosed. - for (int run = 0; run < 20; run++) { - LOG_INFO("Start of run " << run); - Client client(serviceUrl); - const std::string partitionedTopic = - "testProducerIsConnectedPartitioned-" + std::to_string(time(nullptr)); - - int res = makePutRequest( - adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", "10"); - ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; - - ProducerConfiguration producerConfiguration; - producerConfiguration.setLazyStartPartitionedProducers(true); - producerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition); - producerConfiguration.setBatchingEnabled(true); - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producerConfiguration, producer)); - - int sendCount = 100; - std::vector> promises(sendCount); - Promise promiseClose; - - // only call closeAsync once at least 10 messages have been sent - Latch sendStartLatch(10); - Latch closeLatch(1); - int closedAt = 0; - - std::thread t1([&]() { - for (int i = 0; i < sendCount; i++) { - sendStartLatch.countdown(); - Message msg = MessageBuilder().setContent("test").build(); - - if (closeLatch.getCount() == 0 && closedAt == 0) { - closedAt = i; - LOG_INFO("closedAt set to " << closedAt) - } - - producer.sendAsync(msg, WaitForCallbackValue(promises[i])); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - } - }); - - std::thread t2([&]() { - sendStartLatch.wait(std::chrono::milliseconds(1000)); - LOG_INFO("Closing"); - producer.closeAsync(WaitForCallback(promiseClose)); - LOG_INFO("Close called"); - closeLatch.countdown(); - Result result; - promiseClose.getFuture().get(result); - ASSERT_EQ(ResultOk, result); - LOG_INFO("Closed"); - }); - - t1.join(); - t2.join(); - - // make sure that all messages after the moment when closeAsync was invoked - // return AlreadyClosed - for (int i = 0; i < sendCount; i++) { - LOG_DEBUG("Checking " << i) - - // whether a message was sent successfully or not, it's callback - // must have been invoked - ASSERT_EQ(true, promises[i].isComplete()); - MessageId mi; - Result res = promises[i].getFuture().get(mi); - LOG_DEBUG("Result is " << res); - - // for the messages sent after closeAsync was invoked, they - // should all return ResultAlreadyClosed - if (i >= closedAt) { - ASSERT_EQ(ResultAlreadyClosed, res); - } - } - - client.close(); - LOG_INFO("End of run " << run); - } -} \ No newline at end of file +TEST(ProducerTest, testBacklogQuotasExceeded) { + std::string ns = "public/test-backlog-quotas"; + std::string topic = ns + "/testBacklogQuotasExceeded" + std::to_string(time(nullptr)); + + int res = makePutRequest(adminUrl + "admin/v2/persistent/" + topic + "/partitions", "5"); + ASSERT_TRUE(res == 204 || res == 409) << "res: " << res; + LOG_INFO("Created topic " << topic << " with 5 partitions"); + + auto setBacklogPolicy = [&ns](const std::string& policy, int limitSize) { + const auto body = + R"({"policy":")" + policy + R"(","limitSize":)" + std::to_string(limitSize) + "}"; + int res = makePostRequest(adminUrl + "admin/v2/namespaces/" + ns + "/backlogQuota", body); + LOG_INFO(res << " | Change the backlog policy to: " << body); + ASSERT_TRUE(res == 204 || res == 409); + }; + + Client client(serviceUrl); + + // Create a topic with backlog size that is greater than 1024 + Consumer consumer; + ASSERT_EQ(ResultOk, client.subscribe(topic, "sub", consumer)); // create a cursor + Producer producer; + + const auto partition = topic + "-partition-0"; + ASSERT_EQ(ResultOk, client.createProducer(partition, producer)); + ASSERT_EQ(ResultOk, producer.send(MessageBuilder().setContent(std::string(1024L, 'a')).build())); + ASSERT_EQ(ResultOk, producer.close()); + ASSERT_EQ(ResultOk, consumer.close()); + + setBacklogPolicy("producer_request_hold", 1024); + ASSERT_EQ(ResultProducerBlockedQuotaExceededError, client.createProducer(topic, producer)); + ASSERT_EQ(ResultProducerBlockedQuotaExceededError, client.createProducer(partition, producer)); + + setBacklogPolicy("producer_exception", 1024); + ASSERT_EQ(ResultProducerBlockedQuotaExceededException, client.createProducer(topic, producer)); + ASSERT_EQ(ResultProducerBlockedQuotaExceededException, client.createProducer(partition, producer)); + + setBacklogPolicy("consumer_backlog_eviction", 1024); + ASSERT_EQ(ResultOk, client.createProducer(topic, producer)); + ASSERT_EQ(ResultOk, client.createProducer(partition, producer)); + + client.close(); +} diff --git a/pulsar-client-cpp/tests/PromiseTest.cc b/pulsar-client-cpp/tests/PromiseTest.cc new file mode 100644 index 0000000000000..73c6f8c230846 --- /dev/null +++ b/pulsar-client-cpp/tests/PromiseTest.cc @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include +#include +#include +#include +#include +#include + +using namespace pulsar; + +TEST(PromiseTest, testSetValue) { + Promise promise; + std::thread t{[promise] { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + promise.setValue("hello"); + }}; + t.detach(); + + std::string value; + ASSERT_EQ(promise.getFuture().get(value), 0); + ASSERT_EQ(value, "hello"); +} + +TEST(PromiseTest, testSetFailed) { + Promise promise; + std::thread t{[promise] { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + promise.setFailed(-1); + }}; + t.detach(); + + std::string value; + ASSERT_EQ(promise.getFuture().get(value), -1); + ASSERT_EQ(value, ""); +} + +TEST(PromiseTest, testListeners) { + Promise promise; + auto future = promise.getFuture(); + + bool resultSetFailed = true; + bool resultSetValue = true; + std::vector results; + std::vector values; + + future + .addListener([promise, &resultSetFailed, &results, &values](int result, const std::string& value) { + resultSetFailed = promise.setFailed(-1L); + results.emplace_back(result); + values.emplace_back(value); + }) + .addListener([promise, &resultSetValue, &results, &values](int result, const std::string& value) { + resultSetValue = promise.setValue("WRONG"); + results.emplace_back(result); + values.emplace_back(value); + }); + + promise.setValue("hello"); + std::string value; + ASSERT_EQ(future.get(value), 0); + ASSERT_EQ(value, "hello"); + + ASSERT_FALSE(resultSetFailed); + ASSERT_FALSE(resultSetValue); + ASSERT_EQ(results, (std::vector(2, 0))); + ASSERT_EQ(values, (std::vector(2, "hello"))); +} diff --git a/pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc b/pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc index ab4fedb79e141..df1f9c6055782 100644 --- a/pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc +++ b/pulsar-client-cpp/tests/ProtobufNativeSchemaTest.cc @@ -121,6 +121,9 @@ TEST(ProtobufNativeSchemaTest, testEndToEnd) { receivedTestMessage.ParseFromArray(msg.getData(), msg.getLength()); ASSERT_EQ(receivedTestMessage.testenum(), ::proto::TestEnum::FAILOVER); + ASSERT_TRUE(msg.hasSchemaVersion()); + ASSERT_EQ(msg.getSchemaVersion(), std::string(8L, '\0')); + client.close(); } diff --git a/pulsar-client-cpp/tests/PulsarFriend.h b/pulsar-client-cpp/tests/PulsarFriend.h index aed7096366ad8..9325d3877b566 100644 --- a/pulsar-client-cpp/tests/PulsarFriend.h +++ b/pulsar-client-cpp/tests/PulsarFriend.h @@ -23,8 +23,8 @@ #include "lib/ProducerImpl.h" #include "lib/PartitionedProducerImpl.h" #include "lib/ConsumerImpl.h" -#include "lib/PartitionedConsumerImpl.h" #include "lib/MultiTopicsConsumerImpl.h" +#include "lib/ReaderImpl.h" using std::string; @@ -79,16 +79,26 @@ class PulsarFriend { return std::static_pointer_cast(consumer.impl_); } - static std::shared_ptr getPartitionedConsumerImplPtr(Consumer consumer) { - return std::static_pointer_cast(consumer.impl_); + static ConsumerImplPtr getConsumer(Reader reader) { + return std::static_pointer_cast(reader.impl_->getConsumer().lock()); } + static ReaderImplWeakPtr getReaderImplWeakPtr(Reader reader) { return reader.impl_; } + static std::shared_ptr getMultiTopicsConsumerImplPtr(Consumer consumer) { return std::static_pointer_cast(consumer.impl_); } static std::shared_ptr getClientImplPtr(Client client) { return client.impl_; } + static ClientImpl::ProducersList& getProducers(const Client& client) { + return getClientImplPtr(client)->producers_; + } + + static ClientImpl::ConsumersList& getConsumers(const Client& client) { + return getClientImplPtr(client)->consumers_; + } + static void setNegativeAckEnabled(Consumer consumer, bool enabled) { consumer.impl_->setNegativeAcknowledgeEnabledForTesting(enabled); } diff --git a/pulsar-client-cpp/tests/ReaderTest.cc b/pulsar-client-cpp/tests/ReaderTest.cc index d95038b3cef3c..bf156927590ed 100644 --- a/pulsar-client-cpp/tests/ReaderTest.cc +++ b/pulsar-client-cpp/tests/ReaderTest.cc @@ -18,15 +18,17 @@ */ #include #include -#include "ReaderTest.h" #include "HttpHelper.h" +#include "PulsarFriend.h" #include #include #include +#include #include +#include DECLARE_LOG_OBJECT() using namespace pulsar; @@ -422,50 +424,6 @@ TEST(ReaderTest, testReaderReachEndOfTopicMessageWithoutBatches) { client.close(); } -TEST(ReaderTest, testReferenceLeak) { - Client client(serviceUrl); - - std::string topicName = "persistent://public/default/testReferenceLeak"; - - Producer producer; - ASSERT_EQ(ResultOk, client.createProducer(topicName, producer)); - - for (int i = 0; i < 10; i++) { - std::string content = "my-message-" + std::to_string(i); - Message msg = MessageBuilder().setContent(content).build(); - ASSERT_EQ(ResultOk, producer.send(msg)); - } - - ReaderConfiguration readerConf; - Reader reader; - ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - - ConsumerImplBaseWeakPtr consumerPtr = ReaderTest::getConsumer(reader); - ReaderImplWeakPtr readerPtr = ReaderTest::getReaderImplWeakPtr(reader); - - LOG_INFO("1 consumer use count " << consumerPtr.use_count()); - LOG_INFO("1 reader use count " << readerPtr.use_count()); - - for (int i = 0; i < 10; i++) { - Message msg; - ASSERT_EQ(ResultOk, reader.readNext(msg)); - - std::string content = msg.getDataAsString(); - std::string expected = "my-message-" + std::to_string(i); - ASSERT_EQ(expected, content); - } - - producer.close(); - reader.close(); - // will be released after exit this method. - ASSERT_EQ(1, consumerPtr.use_count()); - ASSERT_EQ(1, readerPtr.use_count()); - client.close(); - // will be released after exit this method. - ASSERT_EQ(1, consumerPtr.use_count()); - ASSERT_EQ(1, readerPtr.use_count()); -} - TEST(ReaderTest, testPartitionIndex) { Client client(serviceUrl); @@ -518,7 +476,7 @@ TEST(ReaderTest, testSubscriptionNameSetting) { Reader reader; ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - ASSERT_EQ(subName, ReaderTest::getConsumer(reader)->getSubscriptionName()); + ASSERT_EQ(subName, PulsarFriend::getConsumer(reader)->getSubscriptionName()); reader.close(); client.close(); @@ -536,7 +494,7 @@ TEST(ReaderTest, testSetSubscriptionNameAndPrefix) { Reader reader; ASSERT_EQ(ResultOk, client.createReader(topicName, MessageId::earliest(), readerConf, reader)); - ASSERT_EQ(subName, ReaderTest::getConsumer(reader)->getSubscriptionName()); + ASSERT_EQ(subName, PulsarFriend::getConsumer(reader)->getSubscriptionName()); reader.close(); client.close(); @@ -577,3 +535,47 @@ TEST(ReaderTest, testIsConnected) { ASSERT_EQ(ResultOk, reader.close()); ASSERT_FALSE(reader.isConnected()); } + +TEST(ReaderTest, testHasMessageAvailableWhenCreated) { + const std::string topic = "testHasMessageAvailableWhenCreated-" + std::to_string(time(nullptr)); + Client client(serviceUrl); + + ProducerConfiguration producerConf; + producerConf.setBatchingMaxMessages(3); + Producer producer; + ASSERT_EQ(ResultOk, client.createProducer(topic, producerConf, producer)); + + std::vector messageIds; + constexpr int numMessages = 7; + Latch latch(numMessages); + for (int i = 0; i < numMessages; i++) { + producer.sendAsync(MessageBuilder().setContent("msg-" + std::to_string(i)).build(), + [i, &messageIds, &latch](Result result, const MessageId& messageId) { + if (result == ResultOk) { + LOG_INFO("Send " << i << " to " << messageId); + messageIds.emplace_back(messageId); + } else { + LOG_ERROR("Failed to send " << i << ": " << messageId); + } + latch.countdown(); + }); + } + latch.wait(std::chrono::seconds(3)); + ASSERT_EQ(messageIds.size(), numMessages); + + Reader reader; + bool hasMessageAvailable; + + for (size_t i = 0; i < messageIds.size() - 1; i++) { + ASSERT_EQ(ResultOk, client.createReader(topic, messageIds[i], {}, reader)); + ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); + EXPECT_TRUE(hasMessageAvailable); + } + + // The start message ID is exclusive by default, so when we start at the last message, there should be no + // message available. + ASSERT_EQ(ResultOk, client.createReader(topic, messageIds.back(), {}, reader)); + ASSERT_EQ(ResultOk, reader.hasMessageAvailable(hasMessageAvailable)); + EXPECT_FALSE(hasMessageAvailable); + client.close(); +} diff --git a/pulsar-client-cpp/tests/SchemaTest.cc b/pulsar-client-cpp/tests/SchemaTest.cc index 1776460f895a2..f15365219f748 100644 --- a/pulsar-client-cpp/tests/SchemaTest.cc +++ b/pulsar-client-cpp/tests/SchemaTest.cc @@ -73,3 +73,37 @@ TEST(SchemaTest, testSchema) { client.close(); } + +TEST(SchemaTest, testHasSchemaVersion) { + Client client(lookupUrl); + std::string topic = "SchemaTest-HasSchemaVersion"; + SchemaInfo stringSchema(SchemaType::STRING, "String", ""); + + Consumer consumer; + ASSERT_EQ(ResultOk, client.subscribe(topic + "1", "sub", ConsumerConfiguration().setSchema(stringSchema), + consumer)); + Producer batchedProducer; + ASSERT_EQ(ResultOk, client.createProducer(topic + "1", ProducerConfiguration().setSchema(stringSchema), + batchedProducer)); + Producer nonBatchedProducer; + ASSERT_EQ(ResultOk, client.createProducer(topic + "1", ProducerConfiguration().setSchema(stringSchema), + nonBatchedProducer)); + + ASSERT_EQ(ResultOk, batchedProducer.send(MessageBuilder().setContent("msg-0").build())); + ASSERT_EQ(ResultOk, nonBatchedProducer.send(MessageBuilder().setContent("msg-1").build())); + + Message msgs[2]; + ASSERT_EQ(ResultOk, consumer.receive(msgs[0], 3000)); + ASSERT_EQ(ResultOk, consumer.receive(msgs[1], 3000)); + + std::string schemaVersion(8, '\0'); + ASSERT_EQ(msgs[0].getDataAsString(), "msg-0"); + ASSERT_TRUE(msgs[0].hasSchemaVersion()); + ASSERT_EQ(msgs[0].getSchemaVersion(), schemaVersion); + + ASSERT_EQ(msgs[1].getDataAsString(), "msg-1"); + ASSERT_TRUE(msgs[1].hasSchemaVersion()); + ASSERT_EQ(msgs[1].getSchemaVersion(), schemaVersion); + + client.close(); +} diff --git a/pulsar-client-cpp/tests/SynchronizedHashMapTest.cc b/pulsar-client-cpp/tests/SynchronizedHashMapTest.cc new file mode 100644 index 0000000000000..8d74a24014a62 --- /dev/null +++ b/pulsar-client-cpp/tests/SynchronizedHashMapTest.cc @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include +#include +#include +#include +#include +#include +#include "lib/Latch.h" +#include "lib/SynchronizedHashMap.h" + +using namespace pulsar; +using SyncMapType = SynchronizedHashMap; +using OptValue = typename SyncMapType::OptValue; +using PairVector = typename SyncMapType::PairVector; + +inline void sleepMs(long millis) { std::this_thread::sleep_for(std::chrono::milliseconds(millis)); } + +inline PairVector sort(PairVector pairs) { + std::sort(pairs.begin(), pairs.end(), [](const std::pair& lhs, const std::pair& rhs) { + return lhs.first < rhs.first; + }); + return pairs; +} + +TEST(SynchronizedHashMap, testClear) { + SyncMapType m({{1, 100}, {2, 200}}); + m.clear(); + ASSERT_EQ(m.toPairVector(), PairVector{}); + + PairVector expectedPairs({{3, 300}, {4, 400}}); + SyncMapType m2(expectedPairs); + PairVector pairs; + m2.clear([&pairs](const int& key, const int& value) { pairs.emplace_back(key, value); }); + ASSERT_EQ(m2.toPairVector(), PairVector{}); + ASSERT_EQ(sort(pairs), expectedPairs); +} + +TEST(SynchronizedHashMap, testRemoveAndFind) { + SyncMapType m({{1, 100}, {2, 200}, {3, 300}}); + + OptValue optValue; + optValue = m.findFirstValueIf([](const int& x) { return x == 200; }); + ASSERT_TRUE(optValue.is_present()); + ASSERT_EQ(optValue.value(), 200); + + optValue = m.findFirstValueIf([](const int& x) { return x >= 301; }); + ASSERT_FALSE(optValue.is_present()); + + optValue = m.find(1); + ASSERT_TRUE(optValue.is_present()); + ASSERT_EQ(optValue.value(), 100); + + ASSERT_FALSE(m.find(0).is_present()); + ASSERT_FALSE(m.remove(0).is_present()); + + optValue = m.remove(1); + ASSERT_TRUE(optValue.is_present()); + ASSERT_EQ(optValue.value(), 100); + + ASSERT_FALSE(m.remove(1).is_present()); + ASSERT_FALSE(m.find(1).is_present()); +} + +TEST(SynchronizedHashMapTest, testForEach) { + SyncMapType m({{1, 100}, {2, 200}, {3, 300}}); + std::vector values; + m.forEachValue([&values](const int& value) { values.emplace_back(value); }); + std::sort(values.begin(), values.end()); + ASSERT_EQ(values, std::vector({100, 200, 300})); + + PairVector pairs; + m.forEach([&pairs](const int& key, const int& value) { pairs.emplace_back(key, value); }); + PairVector expectedPairs({{1, 100}, {2, 200}, {3, 300}}); + ASSERT_EQ(sort(pairs), expectedPairs); +} + +TEST(SynchronizedHashMap, testRecursiveMutex) { + SyncMapType m({{1, 100}}); + OptValue optValue; + m.forEach([&m, &optValue](const int& key, const int& value) { + optValue = m.find(key); // the internal mutex was locked again + }); + ASSERT_TRUE(optValue.is_present()); + ASSERT_EQ(optValue.value(), 100); +} + +TEST(SynchronizedHashMapTest, testThreadSafeForEach) { + SyncMapType m({{1, 100}, {2, 200}, {3, 300}}); + + Latch latch(1); + std::thread t{[&m, &latch] { + latch.wait(); // this thread must start after `m.forEach` started + m.remove(2); + }}; + + std::atomic_bool firstElementDone{false}; + PairVector pairs; + m.forEach([&latch, &firstElementDone, &pairs](const int& key, const int& value) { + pairs.emplace_back(key, value); + if (!firstElementDone) { + latch.countdown(); + firstElementDone = true; + } + sleepMs(200); + }); + { + PairVector expectedPairs({{1, 100}, {2, 200}, {3, 300}}); + ASSERT_EQ(sort(pairs), expectedPairs); + } + t.join(); + { + PairVector expectedPairs({{1, 100}, {3, 300}}); + ASSERT_EQ(sort(m.toPairVector()), expectedPairs); + } +} diff --git a/pulsar-client-cpp/tests/VersionTest.cc b/pulsar-client-cpp/tests/VersionTest.cc new file mode 100644 index 0000000000000..57e1e78376200 --- /dev/null +++ b/pulsar-client-cpp/tests/VersionTest.cc @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#include +#include + +TEST(VersionTest, testMacro) { +#ifdef PULSAR_VERSION + ASSERT_GE(PULSAR_VERSION, 2000000); + ASSERT_LE(PULSAR_VERSION, 999999999); +#else + FAIL(); +#endif +} diff --git a/pulsar-client-messagecrypto-bc/pom.xml b/pulsar-client-messagecrypto-bc/pom.xml index 45e97a7267b83..c433ef114be24 100644 --- a/pulsar-client-messagecrypto-bc/pom.xml +++ b/pulsar-client-messagecrypto-bc/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java b/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java index e4f3200735edb..4ce457401cd70 100644 --- a/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java +++ b/pulsar-client-messagecrypto-bc/src/main/java/org/apache/pulsar/client/impl/crypto/MessageCryptoBc.java @@ -389,6 +389,7 @@ public synchronized void encrypt(Set encKeys, CryptoKeyReader keyReader, return; } + msgMetadata.clearEncryptionKeys(); // Update message metadata with encrypted data key for (String keyName : encKeys) { if (encryptedDataKeyMap.get(keyName) == null) { diff --git a/pulsar-client-shaded/pom.xml b/pulsar-client-shaded/pom.xml index a3df727fa7ed1..b9c7ac32f475e 100644 --- a/pulsar-client-shaded/pom.xml +++ b/pulsar-client-shaded/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-tools-test/pom.xml b/pulsar-client-tools-test/pom.xml index efe90ceaaeada..6cafa3a8d7e33 100644 --- a/pulsar-client-tools-test/pom.xml +++ b/pulsar-client-tools-test/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java index 32befc8a95386..5b8b4da52cb6b 100644 --- a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java +++ b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/admin/cli/PulsarAdminToolTest.java @@ -28,10 +28,16 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; +import static org.testng.Assert.fail; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; + +import java.io.File; import java.lang.reflect.Field; +import java.net.URL; +import java.net.URLClassLoader; import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; @@ -40,6 +46,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import org.apache.pulsar.admin.cli.utils.SchemaExtractor; import org.apache.pulsar.client.admin.Bookies; import org.apache.pulsar.client.admin.BrokerStats; import org.apache.pulsar.client.admin.Brokers; @@ -59,6 +66,7 @@ import org.apache.pulsar.client.admin.internal.PulsarAdminBuilderImpl; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.api.schema.SchemaDefinition; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.MessageIdImpl; import org.apache.pulsar.client.impl.MessageImpl; @@ -92,6 +100,7 @@ import org.apache.pulsar.common.policies.data.SubscribeRate; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.apache.pulsar.common.policies.data.TopicType; +import org.apache.pulsar.common.protocol.schema.PostSchemaPayload; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.mockito.ArgumentMatcher; import org.mockito.Mockito; @@ -368,6 +377,9 @@ public void namespaces() throws Exception { namespaces.run(split("get-subscription-types-enabled myprop/clust/ns1")); verify(mockNamespaces).getSubscriptionTypesEnabled("myprop/clust/ns1"); + namespaces.run(split("get-schema-validation-enforce myprop/clust/ns1 -ap")); + verify(mockNamespaces).getSchemaValidationEnforced("myprop/clust/ns1", true); + namespaces .run(split("set-bookie-affinity-group myprop/clust/ns1 --primary-group test1 --secondary-group test2")); verify(mockNamespaces).setBookieAffinityGroup("myprop/clust/ns1", @@ -505,6 +517,9 @@ public void namespaces() throws Exception { .topicType(TopicType.NON_PARTITIONED.toString()) .build()); + namespaces.run(split("get-auto-topic-creation myprop/clust/ns1")); + verify(mockNamespaces).getAutoTopicCreation("myprop/clust/ns1"); + namespaces.run(split("remove-auto-topic-creation myprop/clust/ns1")); verify(mockNamespaces).removeAutoTopicCreation("myprop/clust/ns1"); @@ -512,6 +527,9 @@ public void namespaces() throws Exception { verify(mockNamespaces).setAutoSubscriptionCreation("myprop/clust/ns1", AutoSubscriptionCreationOverride.builder().allowAutoSubscriptionCreation(true).build()); + namespaces.run(split("get-auto-subscription-creation myprop/clust/ns1")); + verify(mockNamespaces).getAutoSubscriptionCreation("myprop/clust/ns1"); + namespaces.run(split("remove-auto-subscription-creation myprop/clust/ns1")); verify(mockNamespaces).removeAutoSubscriptionCreation("myprop/clust/ns1"); @@ -1443,6 +1461,23 @@ public void bookies() throws Exception { .rack("rack-1") .hostname("host-1") .build()); + + // test invalid rack name "" + try { + BookieInfo.builder().rack("").hostname("host-1").build(); + fail(); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "rack name is invalid, it should not be null, empty or '/'"); + } + + // test invalid rack name "/" + try { + BookieInfo.builder().rack("/").hostname("host-1").build(); + fail(); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "rack name is invalid, it should not be null, empty or '/'"); + } + } @Test @@ -1588,6 +1623,63 @@ void transactions() throws Exception { verify(transactions).getPendingAckInternalStats("test", "test", false); } + @Test + void schemas() throws Exception { + PulsarAdmin admin = Mockito.mock(PulsarAdmin.class); + Schemas schemas = Mockito.mock(Schemas.class); + doReturn(schemas).when(admin).schemas(); + + CmdSchemas cmdSchemas = new CmdSchemas(() -> admin); + cmdSchemas.run(split("get -v 1 persistent://tn1/ns1/tp1")); + verify(schemas).getSchemaInfo("persistent://tn1/ns1/tp1", 1); + + cmdSchemas = new CmdSchemas(() -> admin); + cmdSchemas.run(split("get -a persistent://tn1/ns1/tp1")); + verify(schemas).getAllSchemas("persistent://tn1/ns1/tp1"); + + cmdSchemas = new CmdSchemas(() -> admin); + cmdSchemas.run(split("get persistent://tn1/ns1/tp1")); + verify(schemas).getSchemaInfoWithVersion("persistent://tn1/ns1/tp1"); + + cmdSchemas = new CmdSchemas(() -> admin); + cmdSchemas.run(split("delete persistent://tn1/ns1/tp1")); + verify(schemas).deleteSchema("persistent://tn1/ns1/tp1"); + + cmdSchemas = new CmdSchemas(() -> admin); + String schemaFile = PulsarAdminToolTest.class.getClassLoader() + .getResource("test_schema_create.json").getFile(); + cmdSchemas.run(split("upload -f " + schemaFile + " persistent://tn1/ns1/tp1")); + PostSchemaPayload input = new ObjectMapper().readValue(new File(schemaFile), PostSchemaPayload.class); + verify(schemas).createSchema("persistent://tn1/ns1/tp1", input); + + cmdSchemas = new CmdSchemas(() -> admin); + String jarFile = PulsarAdminToolTest.class.getClassLoader() + .getResource("dummyexamples.jar").getFile(); + String className = SchemaDemo.class.getName(); + cmdSchemas.run(split("extract -j " + jarFile + " -c " + className + " -t json persistent://tn1/ns1/tp1")); + File file = new File(jarFile); + ClassLoader cl = new URLClassLoader(new URL[]{file.toURI().toURL()}); + Class cls = cl.loadClass(className); + SchemaDefinition schemaDefinition = + SchemaDefinition.builder() + .withPojo(cls) + .withAlwaysAllowNull(true) + .build(); + PostSchemaPayload postSchemaPayload = new PostSchemaPayload(); + postSchemaPayload.setType("JSON"); + postSchemaPayload.setSchema(SchemaExtractor.getJsonSchemaInfo(schemaDefinition)); + postSchemaPayload.setProperties(schemaDefinition.getProperties()); + verify(schemas).createSchema("persistent://tn1/ns1/tp1", postSchemaPayload); + } + + public static class SchemaDemo { + public SchemaDemo() { + } + + public static void main(String[] args) { + } + } + String[] split(String s) { return s.split(" "); } diff --git a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java index d56308911ddf4..df373f3e288d8 100644 --- a/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java +++ b/pulsar-client-tools-test/src/test/java/org/apache/pulsar/client/cli/PulsarClientToolTest.java @@ -33,6 +33,9 @@ import lombok.Cleanup; import org.apache.pulsar.broker.service.BrokerTestBase; import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; import org.apache.pulsar.common.policies.data.TenantInfoImpl; import org.awaitility.Awaitility; import org.testng.Assert; @@ -106,7 +109,7 @@ public void testNonDurableSubscribe() throws Exception { properties.setProperty("serviceUrl", brokerUrl.toString()); properties.setProperty("useTls", "false"); - final String topicName = "persistent://prop/ns-abc/test/topic-" + UUID.randomUUID().toString(); + final String topicName = getTopicWithRandomSuffix("non-durable"); int numberOfMessages = 10; @Cleanup("shutdownNow") @@ -155,7 +158,7 @@ public void testDurableSubscribe() throws Exception { properties.setProperty("serviceUrl", brokerUrl.toString()); properties.setProperty("useTls", "false"); - final String topicName = "persistent://prop/ns-abc/test/topic-" + UUID.randomUUID().toString(); + final String topicName = getTopicWithRandomSuffix("durable"); int numberOfMessages = 10; @Cleanup("shutdownNow") @@ -197,7 +200,7 @@ public void testEncryption() throws Exception { properties.setProperty("serviceUrl", brokerUrl.toString()); properties.setProperty("useTls", "false"); - final String topicName = "persistent://prop/ns-abc/test/topic-" + UUID.randomUUID().toString(); + final String topicName = getTopicWithRandomSuffix("encryption"); final String keyUriBase = "file:../pulsar-broker/src/test/resources/certificate/"; final int numberOfMessages = 10; @@ -234,4 +237,63 @@ public void testEncryption() throws Exception { } } + @Test(timeOut = 20000) + public void testDisableBatching() throws Exception { + Properties properties = new Properties(); + properties.setProperty("serviceUrl", brokerUrl.toString()); + properties.setProperty("useTls", "false"); + + final String topicName = getTopicWithRandomSuffix("disable-batching"); + final int numberOfMessages = 5; + + @Cleanup + Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("sub").subscribe(); + + PulsarClientTool pulsarClientTool1 = new PulsarClientTool(properties); + String[] args1 = {"produce", "-m", "batched", "-n", Integer.toString(numberOfMessages), topicName}; + Assert.assertEquals(pulsarClientTool1.run(args1), 0); + + PulsarClientTool pulsarClientTool2 = new PulsarClientTool(properties); + String[] args2 = {"produce", "-m", "non-batched", "-n", Integer.toString(numberOfMessages), "-db", topicName}; + Assert.assertEquals(pulsarClientTool2.run(args2), 0); + + for (int i = 0; i < numberOfMessages * 2; i++) { + Message msg = consumer.receive(10, TimeUnit.SECONDS); + Assert.assertNotNull(msg); + if (i < numberOfMessages) { + Assert.assertEquals(new String(msg.getData()), "batched"); + Assert.assertTrue(msg.getMessageId() instanceof BatchMessageIdImpl); + } else { + Assert.assertEquals(new String(msg.getData()), "non-batched"); + Assert.assertFalse(msg.getMessageId() instanceof BatchMessageIdImpl); + } + } + } + + @Test + public void testSendMultipleMessage() throws Exception { + Properties properties = new Properties(); + properties.setProperty("serviceUrl", brokerUrl.toString()); + properties.setProperty("useTls", "false"); + + final String topicName = getTopicWithRandomSuffix("test-multiple-msg"); + + @Cleanup + Consumer consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("sub").subscribe(); + + PulsarClientTool pulsarClientTool = new PulsarClientTool(properties); + String[] args1 = {"produce", "-m", "msg0", "-m", "msg1,msg2", topicName}; + Assert.assertEquals(pulsarClientTool.run(args1), 0); + + for (int i = 0; i < 3; i++) { + Message msg = consumer.receive(10, TimeUnit.SECONDS); + Assert.assertNotNull(msg); + Assert.assertEquals(new String(msg.getData()), "msg" + i); + } + } + + private static String getTopicWithRandomSuffix(String localNameBase) { + return String.format("persistent://prop/ns-abc/test/%s-%s", localNameBase, UUID.randomUUID().toString()); + } + } diff --git a/pulsar-client-tools-test/src/test/resources/test_schema_create.json b/pulsar-client-tools-test/src/test/resources/test_schema_create.json new file mode 100644 index 0000000000000..241a985c073f7 --- /dev/null +++ b/pulsar-client-tools-test/src/test/resources/test_schema_create.json @@ -0,0 +1,4 @@ +{ + "type":"json", + "schema":"" +} \ No newline at end of file diff --git a/pulsar-client-tools/pom.xml b/pulsar-client-tools/pom.xml index 4fc38f1336adf..d74731449db7b 100644 --- a/pulsar-client-tools/pom.xml +++ b/pulsar-client-tools/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -72,6 +72,10 @@ org.asynchttpclient async-http-client + + com.typesafe.netty + netty-reactive-streams + org.apache.commons commons-lang3 diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CliCommand.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CliCommand.java index 2f1cf72db813a..d91a880b3926d 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CliCommand.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CliCommand.java @@ -22,6 +22,7 @@ import java.util.Map; import java.util.Set; +import com.google.common.collect.Sets; import org.apache.pulsar.client.admin.PulsarAdminException; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.impl.MessageIdImpl; diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBookies.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBookies.java index e6551b9eb5230..38fd8473f7aba 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBookies.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdBookies.java @@ -25,6 +25,8 @@ import com.beust.jcommander.Parameters; import java.util.function.Supplier; +import com.google.common.base.Strings; +import lombok.NonNull; @Parameters(commandDescription = "Operations about bookies rack placement") public class CmdBookies extends CmdBase { @@ -41,7 +43,7 @@ void run() throws Exception { @Parameters(commandDescription = "Gets the rack placement information for a specific bookie in the cluster") private class GetBookie extends CliCommand { - @Parameter(names = { "-b", "--bookie" }, description = "bookie address", required = true) + @Parameter(names = { "-b", "--bookie" }, description = "Bookie address (format: `address:port`)", required = true) private String bookieAddress; @Override @@ -62,7 +64,7 @@ void run() throws Exception { @Parameters(commandDescription = "Remove rack placement information for a specific bookie in the cluster") private class RemoveBookie extends CliCommand { - @Parameter(names = { "-b", "--bookie" }, description = "bookie address", required = true) + @Parameter(names = { "-b", "--bookie" }, description = "Bookie address (format: `address:port`)", required = true) private String bookieAddress; @Override @@ -73,6 +75,8 @@ void run() throws Exception { @Parameters(commandDescription = "Updates the rack placement information for a specific bookie in the cluster (note. bookie address format:`address:port`)") private class UpdateBookie extends CliCommand { + private static final String PATH_SEPARATOR = "/"; + @Parameter(names = { "-g", "--group" }, description = "Bookie group name", required = false) private String group = "default"; @@ -87,12 +91,21 @@ private class UpdateBookie extends CliCommand { @Override void run() throws Exception { + checkArgument(!Strings.isNullOrEmpty(bookieRack) && !bookieRack.trim().equals(PATH_SEPARATOR), + "rack name is invalid, it should not be null, empty or '/'"); + getAdmin().bookies().updateBookieRackInfo(bookieAddress, group, BookieInfo.builder() .rack(bookieRack) .hostname(bookieHost) .build()); } + + private void checkArgument(boolean expression, @NonNull Object errorMessage) { + if (!expression) { + throw new IllegalArgumentException(String.valueOf(errorMessage)); + } + } } public CmdBookies(Supplier admin) { diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdClusters.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdClusters.java index eb1ca4f13607e..d43c849647235 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdClusters.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdClusters.java @@ -76,11 +76,6 @@ protected void validateClusterData(ClusterData clusterData) { "You must specify tls-trust-store-type, tls-trust-store and tls-trust-store-pwd" + " when enable tls-enable-keystore"); } - } else { - if (StringUtils.isBlank(clusterData.getBrokerClientTrustCertsFilePath())) { - throw new RuntimeException("You must specify tls-trust-certs-filepath" - + " when tls-enable-keystore is not enable"); - } } } } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java index 0664ca8fcfc6e..90ebf689605a2 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java @@ -37,6 +37,7 @@ import java.lang.reflect.Field; import java.lang.reflect.Type; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -319,6 +320,8 @@ abstract class FunctionDetailsCommand extends BaseCommand { protected Integer maxMessageRetries; @Parameter(names = "--custom-runtime-options", description = "A string that encodes options to customize the runtime, see docs for configured runtime for details") protected String customRuntimeOptions; + @Parameter(names = "--secrets", description = "The map of secretName to an object that encapsulates how the secret is fetched by the underlying secrets provider") + protected String secretsString; @Parameter(names = "--dead-letter-topic", description = "The topic where messages that are not processed successfully are sent to") protected String deadLetterTopic; protected FunctionConfig functionConfig; @@ -520,6 +523,15 @@ void processArguments() throws Exception { functionConfig.setCustomRuntimeOptions(customRuntimeOptions); } + if (secretsString != null) { + Type type = new TypeToken>() {}.getType(); + Map secretsMap = new Gson().fromJson(secretsString, type); + if (secretsMap == null) { + secretsMap = Collections.emptyMap(); + } + functionConfig.setSecrets(secretsMap); + } + // window configs WindowConfig windowConfig = functionConfig.getWindowConfig(); if (null != windowLengthCount) { @@ -699,13 +711,13 @@ private void mergeArgs() { if (isBlank(clientAuthParams) && !isBlank(DEPRECATED_clientAuthParams)) { clientAuthParams = DEPRECATED_clientAuthParams; } - if (useTls == false && DEPRECATED_useTls != null) { + if (!useTls && DEPRECATED_useTls != null) { useTls = DEPRECATED_useTls; } - if (tlsAllowInsecureConnection == false && DEPRECATED_tlsAllowInsecureConnection != null) { + if (!tlsAllowInsecureConnection && DEPRECATED_tlsAllowInsecureConnection != null) { tlsAllowInsecureConnection = DEPRECATED_tlsAllowInsecureConnection; } - if (tlsHostNameVerificationEnabled == false && DEPRECATED_tlsHostNameVerificationEnabled != null) { + if (!tlsHostNameVerificationEnabled && DEPRECATED_tlsHostNameVerificationEnabled != null) { tlsHostNameVerificationEnabled = DEPRECATED_tlsHostNameVerificationEnabled; } if (isBlank(tlsTrustCertFilePath) && !isBlank(DEPRECATED_tlsTrustCertFilePath)) { diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java index 435b5d8b45ddb..f9f94c379e9ce 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdNamespaces.java @@ -235,6 +235,18 @@ void run() throws PulsarAdminException { } } + @Parameters(commandDescription = "Get permissions to access subscription admin-api") + private class SubscriptionPermissions extends CliCommand { + @Parameter(description = "tenant/namespace", required = true) + private java.util.List params; + + @Override + void run() throws PulsarAdminException { + String namespace = validateNamespace(params); + print(getAdmin().namespaces().getPermissionOnSubscription(namespace)); + } + } + @Parameters(commandDescription = "Grant permissions to access subscription admin-api") private class GrantSubscriptionPermissions extends CliCommand { @Parameter(description = "tenant/namespace", required = true) @@ -588,6 +600,18 @@ void run() throws PulsarAdminException { } } + @Parameters(commandDescription = "Get autoTopicCreation info for a namespace") + private class GetAutoTopicCreation extends CliCommand { + @Parameter(description = "tenant/namespace", required = true) + private java.util.List params; + + @Override + void run() throws PulsarAdminException { + String namespace = validateNamespace(params); + print(getAdmin().namespaces().getAutoTopicCreation(namespace)); + } + } + @Parameters(commandDescription = "Remove override of autoTopicCreation for a namespace") private class RemoveAutoTopicCreation extends CliCommand { @Parameter(description = "tenant/namespace", required = true) @@ -619,6 +643,18 @@ void run() throws PulsarAdminException { } } + @Parameters(commandDescription = "Get the autoSubscriptionCreation for a namespace") + private class GetAutoSubscriptionCreation extends CliCommand { + @Parameter(description = "tenant/namespace", required = true) + private java.util.List params; + + @Override + void run() throws PulsarAdminException { + String namespace = validateNamespace(params); + print(getAdmin().namespaces().getAutoSubscriptionCreation(namespace)); + } + } + @Parameters(commandDescription = "Remove override of autoSubscriptionCreation for a namespace") private class RemoveAutoSubscriptionCreation extends CliCommand { @Parameter(description = "tenant/namespace", required = true) @@ -661,7 +697,12 @@ private class SetRetention extends CliCommand { void run() throws PulsarAdminException { String namespace = validateNamespace(params); long sizeLimit = validateSizeString(limitStr); - long retentionTimeInSec = RelativeTimeUtil.parseRelativeTimeInSeconds(retentionTimeStr); + long retentionTimeInSec; + try { + retentionTimeInSec = RelativeTimeUtil.parseRelativeTimeInSeconds(retentionTimeStr); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } final int retentionTimeInMin; if (retentionTimeInSec != -1) { @@ -1274,6 +1315,18 @@ void run() throws PulsarAdminException { } } + @Parameters(commandDescription = "Get encryption required for a namespace") + private class GetEncryptionRequired extends CliCommand { + @Parameter(description = "tenant/namespace", required = true) + private java.util.List params; + + @Override + void run() throws PulsarAdminException { + String namespace = validateNamespace(params); + print(getAdmin().namespaces().getEncryptionRequiredStatus(namespace)); + } + } + @Parameters(commandDescription = "Get the delayed delivery policy for a namespace") private class GetDelayedDelivery extends CliCommand { @Parameter(description = "tenant/namespace", required = true) @@ -1344,7 +1397,13 @@ private class SetInactiveTopicPolicies extends CliCommand { @Override void run() throws PulsarAdminException { String namespace = validateNamespace(params); - long maxInactiveDurationInSeconds = TimeUnit.SECONDS.toSeconds(RelativeTimeUtil.parseRelativeTimeInSeconds(deleteInactiveTopicsMaxInactiveDuration)); + long maxInactiveDurationInSeconds; + try { + maxInactiveDurationInSeconds = TimeUnit.SECONDS.toSeconds( + RelativeTimeUtil.parseRelativeTimeInSeconds(deleteInactiveTopicsMaxInactiveDuration)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } if (enableDeleteWhileInactive == disableDeleteWhileInactive) { throw new ParameterException("Need to specify either enable-delete-while-inactive or disable-delete-while-inactive"); @@ -1377,7 +1436,13 @@ private class SetDelayedDelivery extends CliCommand { @Override void run() throws PulsarAdminException { String namespace = validateNamespace(params); - long delayedDeliveryTimeInMills = TimeUnit.SECONDS.toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(delayedDeliveryTimeStr)); + long delayedDeliveryTimeInMills; + try { + delayedDeliveryTimeInMills = TimeUnit.SECONDS.toMillis( + RelativeTimeUtil.parseRelativeTimeInSeconds(delayedDeliveryTimeStr)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } if (enable == disable) { throw new ParameterException("Need to specify either --enable or --disable"); @@ -1406,6 +1471,18 @@ void run() throws Exception { } } + @Parameters(commandDescription = "Get subscriptionAuthMod for a namespace") + private class GetSubscriptionAuthMode extends CliCommand { + @Parameter(description = "tenant/namespace", required = true) + private java.util.List params; + + @Override + void run() throws PulsarAdminException { + String namespace = validateNamespace(params); + print(getAdmin().namespaces().getSubscriptionAuthMode(namespace)); + } + } + @Parameters(commandDescription = "Get deduplicationSnapshotInterval for a namespace") private class GetDeduplicationSnapshotInterval extends CliCommand { @Parameter(description = "tenant/namespace", required = true) @@ -1735,7 +1812,13 @@ private class SetOffloadDeletionLag extends CliCommand { @Override void run() throws PulsarAdminException { String namespace = validateNamespace(params); - getAdmin().namespaces().setOffloadDeleteLag(namespace, RelativeTimeUtil.parseRelativeTimeInSeconds(lag), + long lagInSec; + try { + lagInSec = RelativeTimeUtil.parseRelativeTimeInSeconds(lag); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } + getAdmin().namespaces().setOffloadDeleteLag(namespace, lagInSec, TimeUnit.SECONDS); } } @@ -1877,11 +1960,14 @@ private class GetSchemaValidationEnforced extends CliCommand { @Parameter(description = "tenant/namespace", required = true) private java.util.List params; + @Parameter(names = { "-ap", "--applied" }, description = "Get the applied policy of the namespace") + private boolean applied = false; + @Override void run() throws PulsarAdminException { String namespace = validateNamespace(params); - System.out.println(getAdmin().namespaces().getSchemaValidationEnforced(namespace)); + System.out.println(getAdmin().namespaces().getSchemaValidationEnforced(namespace, applied)); } } @@ -2057,7 +2143,13 @@ && maxValueCheck("ReadBufferSize", readBufferSize, Integer.MAX_VALUE)) { Long offloadAfterElapsedInMillis = OffloadPoliciesImpl.DEFAULT_OFFLOAD_DELETION_LAG_IN_MILLIS; if (StringUtils.isNotEmpty(offloadAfterElapsedStr)) { - Long offloadAfterElapsed = TimeUnit.SECONDS.toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(offloadAfterElapsedStr)); + Long offloadAfterElapsed; + try { + offloadAfterElapsed = TimeUnit.SECONDS.toMillis( + RelativeTimeUtil.parseRelativeTimeInSeconds(offloadAfterElapsedStr)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } if (positiveCheck("OffloadAfterElapsed", offloadAfterElapsed) && maxValueCheck("OffloadAfterElapsed", offloadAfterElapsed, Long.MAX_VALUE)) { offloadAfterElapsedInMillis = offloadAfterElapsed; @@ -2195,15 +2287,18 @@ void run() throws Exception { String namespace = validateNamespace(params); Map map = new HashMap<>(); if (properties.size() == 0) { - throw new IllegalArgumentException("Required at least one property for the namespace."); + throw new ParameterException(String.format("Required at least one property for the namespace, " + + "but found %d.", properties.size())); } for (String property : properties) { if (!property.contains("=")) { - throw new IllegalArgumentException("Invalid key value pair format."); + throw new ParameterException(String.format("Invalid key value pair '%s', " + + "valid format like 'a=a,b=b,c=c'.", property)); } else { String[] keyValue = property.split("="); if (keyValue.length != 2) { - throw new IllegalArgumentException("Invalid key value pair format."); + throw new ParameterException(String.format("Invalid key value pair '%s', " + + "valid format like 'a=a,b=b,c=c'.", property)); } map.put(keyValue[0], keyValue[1]); } @@ -2325,6 +2420,7 @@ public CmdNamespaces(Supplier admin) { jcommander.addCommand("grant-permission", new GrantPermissions()); jcommander.addCommand("revoke-permission", new RevokePermissions()); + jcommander.addCommand("subscription-permission", new SubscriptionPermissions()); jcommander.addCommand("grant-subscription-permission", new GrantSubscriptionPermissions()); jcommander.addCommand("revoke-subscription-permission", new RevokeSubscriptionPermissions()); @@ -2364,9 +2460,11 @@ public CmdNamespaces(Supplier admin) { jcommander.addCommand("remove-deduplication", new RemoveDeduplication()); jcommander.addCommand("set-auto-topic-creation", new SetAutoTopicCreation()); + jcommander.addCommand("get-auto-topic-creation", new GetAutoTopicCreation()); jcommander.addCommand("remove-auto-topic-creation", new RemoveAutoTopicCreation()); jcommander.addCommand("set-auto-subscription-creation", new SetAutoSubscriptionCreation()); + jcommander.addCommand("get-auto-subscription-creation", new GetAutoSubscriptionCreation()); jcommander.addCommand("remove-auto-subscription-creation", new RemoveAutoSubscriptionCreation()); jcommander.addCommand("get-retention", new GetRetention()); @@ -2406,7 +2504,9 @@ public CmdNamespaces(Supplier admin) { jcommander.addCommand("unsubscribe", new Unsubscribe()); jcommander.addCommand("set-encryption-required", new SetEncryptionRequired()); + jcommander.addCommand("get-encryption-required", new GetEncryptionRequired()); jcommander.addCommand("set-subscription-auth-mode", new SetSubscriptionAuthMode()); + jcommander.addCommand("get-subscription-auth-mode", new GetSubscriptionAuthMode()); jcommander.addCommand("set-delayed-delivery", new SetDelayedDelivery()); jcommander.addCommand("get-delayed-delivery", new GetDelayedDelivery()); diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdPersistentTopics.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdPersistentTopics.java index 759b602cc0605..611c1b08be490 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdPersistentTopics.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdPersistentTopics.java @@ -20,6 +20,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank; import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParameterException; import com.beust.jcommander.Parameters; import com.beust.jcommander.converters.CommaParameterSplitter; import com.google.gson.Gson; @@ -528,8 +529,13 @@ void run() throws PulsarAdminException { MessageId messageId = validateMessageIdString(resetMessageIdStr); getPersistentTopics().resetCursor(persistentTopic, subName, messageId); } else if (isNotBlank(resetTimeStr)) { - long resetTimeInMillis = TimeUnit.SECONDS - .toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(resetTimeStr)); + long resetTimeInMillis; + try { + resetTimeInMillis = TimeUnit.SECONDS.toMillis( + RelativeTimeUtil.parseRelativeTimeInSeconds(resetTimeStr)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } // now - go back time long timestamp = System.currentTimeMillis() - resetTimeInMillis; getPersistentTopics().resetCursor(persistentTopic, subName, timestamp); diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java index 045f64e0f12f5..9575ec64d1822 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSchemas.java @@ -19,6 +19,7 @@ package org.apache.pulsar.admin.cli; import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParameterException; import com.beust.jcommander.Parameters; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.File; @@ -48,16 +49,27 @@ private class GetSchema extends CliCommand { @Parameter(description = "persistent://tenant/namespace/topic", required = true) private java.util.List params; - @Parameter(names = { "--version" }, description = "version", required = false) + @Parameter(names = {"-v", "--version"}, description = "version", required = false) private Long version; + @Parameter(names = {"-a", "--all-version"}, description = "all version", required = false) + private boolean all = false; + @Override void run() throws Exception { String topic = validateTopicName(params); - if (version == null) { + if (version != null && all) { + throw new ParameterException("Only one or neither of --version and --all-version can be specified."); + } + if (version == null && !all) { System.out.println(getAdmin().schemas().getSchemaInfoWithVersion(topic)); - } else { + } else if (!all) { + if (version < 0) { + throw new ParameterException("Option --version must be greater than 0, but found " + version); + } System.out.println(getAdmin().schemas().getSchemaInfo(topic, version)); + } else { + print(getAdmin().schemas().getAllSchemas(topic)); } } } @@ -127,10 +139,10 @@ void run() throws Exception { .withPojo(cls) .withAlwaysAllowNull(alwaysAllowNull) .build(); - if (type.toLowerCase().equalsIgnoreCase("avro")) { + if (type.equalsIgnoreCase("avro")) { input.setType("AVRO"); input.setSchema(SchemaExtractor.getAvroSchemaInfo(schemaDefinition)); - } else if (type.toLowerCase().equalsIgnoreCase("json")){ + } else if (type.equalsIgnoreCase("json")){ input.setType("JSON"); input.setSchema(SchemaExtractor.getJsonSchemaInfo(schemaDefinition)); } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSinks.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSinks.java index a4bf82eb3be3e..5d00627e3b418 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSinks.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSinks.java @@ -39,6 +39,7 @@ import java.lang.reflect.Field; import java.lang.reflect.Type; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -191,13 +192,13 @@ private void mergeArgs() { if (isBlank(clientAuthParams) && !isBlank(DEPRECATED_clientAuthParams)) { clientAuthParams = DEPRECATED_clientAuthParams; } - if (useTls == false && DEPRECATED_useTls != null) { + if (!useTls && DEPRECATED_useTls != null) { useTls = DEPRECATED_useTls; } - if (tlsAllowInsecureConnection == false && DEPRECATED_tlsAllowInsecureConnection != null) { + if (!tlsAllowInsecureConnection && DEPRECATED_tlsAllowInsecureConnection != null) { tlsAllowInsecureConnection = DEPRECATED_tlsAllowInsecureConnection; } - if (tlsHostNameVerificationEnabled == false && DEPRECATED_tlsHostNameVerificationEnabled != null) { + if (!tlsHostNameVerificationEnabled && DEPRECATED_tlsHostNameVerificationEnabled != null) { tlsHostNameVerificationEnabled = DEPRECATED_tlsHostNameVerificationEnabled; } if (isBlank(tlsTrustCertFilePath) && !isBlank(DEPRECATED_tlsTrustCertFilePath)) { @@ -359,6 +360,8 @@ abstract class SinkDetailsCommand extends BaseCommand { protected Long negativeAckRedeliveryDelayMs; @Parameter(names = "--custom-runtime-options", description = "A string that encodes options to customize the runtime, see docs for configured runtime for details") protected String customRuntimeOptions; + @Parameter(names = "--secrets", description = "The map of secretName to an object that encapsulates how the secret is fetched by the underlying secrets provider") + protected String secretsString; protected SinkConfig sinkConfig; @@ -524,6 +527,15 @@ void processArguments() throws Exception { sinkConfig.setCustomRuntimeOptions(customRuntimeOptions); } + if (secretsString != null) { + Type type = new TypeToken>() {}.getType(); + Map secretsMap = new Gson().fromJson(secretsString, type); + if (secretsMap == null) { + secretsMap = Collections.emptyMap(); + } + sinkConfig.setSecrets(secretsMap); + } + // check if configs are valid validateSinkConfigs(sinkConfig); } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSources.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSources.java index 6d836e2ce6bf2..1eedf654923a3 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSources.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdSources.java @@ -38,6 +38,7 @@ import java.io.IOException; import java.lang.reflect.Field; import java.lang.reflect.Type; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -190,13 +191,13 @@ private void mergeArgs() { if (isBlank(clientAuthParams) && !isBlank(DEPRECATED_clientAuthParams)) { clientAuthParams = DEPRECATED_clientAuthParams; } - if (useTls == false && DEPRECATED_useTls != null) { + if (!useTls && DEPRECATED_useTls != null) { useTls = DEPRECATED_useTls; } - if (tlsAllowInsecureConnection == false && DEPRECATED_tlsAllowInsecureConnection != null) { + if (!tlsAllowInsecureConnection && DEPRECATED_tlsAllowInsecureConnection != null) { tlsAllowInsecureConnection = DEPRECATED_tlsAllowInsecureConnection; } - if (tlsHostNameVerificationEnabled == false && DEPRECATED_tlsHostNameVerificationEnabled != null) { + if (!tlsHostNameVerificationEnabled && DEPRECATED_tlsHostNameVerificationEnabled != null) { tlsHostNameVerificationEnabled = DEPRECATED_tlsHostNameVerificationEnabled; } if (isBlank(tlsTrustCertFilePath) && !isBlank(DEPRECATED_tlsTrustCertFilePath)) { @@ -339,6 +340,8 @@ abstract class SourceDetailsCommand extends BaseCommand { protected String batchSourceConfigString; @Parameter(names = "--custom-runtime-options", description = "A string that encodes options to customize the runtime, see docs for configured runtime for details") protected String customRuntimeOptions; + @Parameter(names = "--secrets", description = "The map of secretName to an object that encapsulates how the secret is fetched by the underlying secrets provider") + protected String secretsString; protected SourceConfig sourceConfig; @@ -463,6 +466,16 @@ void processArguments() throws Exception { if (customRuntimeOptions != null) { sourceConfig.setCustomRuntimeOptions(customRuntimeOptions); } + + if (secretsString != null) { + Type type = new TypeToken>() {}.getType(); + Map secretsMap = new Gson().fromJson(secretsString, type); + if (secretsMap == null) { + secretsMap = Collections.emptyMap(); + } + sourceConfig.setSecrets(secretsMap); + } + // check if source configs are valid validateSourceConfigs(sourceConfig); } diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java index fbd037a7f2984..c541cc1a5752a 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTopics.java @@ -28,6 +28,8 @@ import com.google.common.collect.Lists; import com.google.gson.Gson; import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; @@ -594,7 +596,12 @@ private class GetInternalInfo extends CliCommand { @Override void run() throws PulsarAdminException { String topic = validateTopicName(params); - String result = getTopics().getInternalInfo(topic); + String internalInfo = getTopics().getInternalInfo(topic); + if (internalInfo == null) { + System.out.println("Did not find any internal metadata info"); + return; + } + JsonObject result = JsonParser.parseString(internalInfo).getAsJsonObject(); Gson gson = new GsonBuilder().setPrettyPrinting().create(); System.out.println(gson.toJson(result)); } @@ -789,8 +796,13 @@ void run() throws PulsarAdminException { getTopics().resetCursor(persistentTopic, subName, messageId); } } else if (isNotBlank(resetTimeStr)) { - long resetTimeInMillis = TimeUnit.SECONDS - .toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(resetTimeStr)); + long resetTimeInMillis; + try { + resetTimeInMillis = TimeUnit.SECONDS.toMillis( + RelativeTimeUtil.parseRelativeTimeInSeconds(resetTimeStr)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } // now - go back time long timestamp = System.currentTimeMillis() - resetTimeInMillis; getTopics().resetCursor(persistentTopic, subName, timestamp); @@ -1277,7 +1289,13 @@ private class SetDelayedDelivery extends CliCommand { @Override void run() throws PulsarAdminException { String topicName = validateTopicName(params); - long delayedDeliveryTimeInMills = TimeUnit.SECONDS.toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(delayedDeliveryTimeStr)); + long delayedDeliveryTimeInMills; + try { + delayedDeliveryTimeInMills = TimeUnit.SECONDS.toMillis( + RelativeTimeUtil.parseRelativeTimeInSeconds(delayedDeliveryTimeStr)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } if (enable == disable) { throw new ParameterException("Need to specify either --enable or --disable"); @@ -1427,7 +1445,12 @@ private class SetRetention extends CliCommand { void run() throws PulsarAdminException { String persistentTopic = validatePersistentTopic(params); long sizeLimit = validateSizeString(limitStr); - long retentionTimeInSec = RelativeTimeUtil.parseRelativeTimeInSeconds(retentionTimeStr); + long retentionTimeInSec; + try { + retentionTimeInSec = RelativeTimeUtil.parseRelativeTimeInSeconds(retentionTimeStr); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } final int retentionTimeInMin; if (retentionTimeInSec != -1) { @@ -2284,7 +2307,13 @@ private class SetInactiveTopicPolicies extends CliCommand { @Override void run() throws PulsarAdminException { String persistentTopic = validatePersistentTopic(params); - long maxInactiveDurationInSeconds = TimeUnit.SECONDS.toSeconds(RelativeTimeUtil.parseRelativeTimeInSeconds(deleteInactiveTopicsMaxInactiveDuration)); + long maxInactiveDurationInSeconds; + try { + maxInactiveDurationInSeconds = TimeUnit.SECONDS.toSeconds( + RelativeTimeUtil.parseRelativeTimeInSeconds(deleteInactiveTopicsMaxInactiveDuration)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } if (enableDeleteWhileInactive == disableDeleteWhileInactive) { throw new ParameterException("Need to specify either enable-delete-while-inactive or disable-delete-while-inactive"); diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTransactions.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTransactions.java index 7faf006066ca6..e6953817b0d26 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTransactions.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdTransactions.java @@ -19,6 +19,7 @@ package org.apache.pulsar.admin.cli; import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParameterException; import com.beust.jcommander.Parameters; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; @@ -133,8 +134,12 @@ private class GetSlowTransactions extends CliCommand { @Override void run() throws Exception { - long timeout = - TimeUnit.SECONDS.toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(timeoutStr)); + long timeout; + try { + timeout = TimeUnit.SECONDS.toMillis(RelativeTimeUtil.parseRelativeTimeInSeconds(timeoutStr)); + } catch (IllegalArgumentException exception) { + throw new ParameterException(exception.getMessage()); + } if (coordinatorId != null) { print(getAdmin().transactions().getSlowTransactionsByCoordinatorId(coordinatorId, timeout, TimeUnit.MILLISECONDS)); diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java index 5f0cd58256815..8992e923d1d76 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java @@ -37,6 +37,8 @@ import org.apache.pulsar.client.admin.PulsarAdminBuilder; import org.apache.pulsar.client.admin.internal.PulsarAdminImpl; +import static org.apache.commons.lang3.StringUtils.isBlank; + public class PulsarAdminTool { private static boolean allowSystemExit = true; @@ -156,6 +158,7 @@ public class PulsarAdminTool { commandMap.put("sink", CmdSinks.class); commandMap.put("packages", CmdPackages.class); + commandMap.put("transactions", CmdTransactions.class); } private static class PulsarAdminSupplier implements Supplier { @@ -249,6 +252,11 @@ boolean run(String[] args, Function a return false; } + if (isBlank(serviceUrl)) { + jcommander.usage(); + return false; + } + if (version) { System.out.println("Current version of pulsar admin client is: " + PulsarVersion.getVersion()); return true; @@ -285,11 +293,12 @@ boolean run(String[] args, Function a public static void main(String[] args) throws Exception { lastExitCode = 0; - String configFile = null; - if (args.length > 0) { - configFile = args[0]; - args = Arrays.copyOfRange(args, 1, args.length); + if (args.length == 0) { + System.out.println("Usage: pulsar-admin CONF_FILE_PATH [options] [command] [command options]"); + exit(0); + return; } + String configFile = args[0]; Properties properties = new Properties(); if (configFile != null) { @@ -301,6 +310,7 @@ public static void main(String[] args) throws Exception { PulsarAdminTool tool = new PulsarAdminTool(properties); int cmdPos; + args = Arrays.copyOfRange(args, 1, args.length); for (cmdPos = 0; cmdPos < args.length; cmdPos++) { if (tool.commandMap.containsKey(args[cmdPos])) { break; @@ -308,7 +318,7 @@ public static void main(String[] args) throws Exception { } ++cmdPos; - boolean isLocalRun = cmdPos < args.length && "localrun".equals(args[cmdPos].toLowerCase()); + boolean isLocalRun = cmdPos < args.length && "localrun".equalsIgnoreCase(args[cmdPos]); Function adminFactory; if (isLocalRun) { diff --git a/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdProduce.java b/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdProduce.java index 1ae2d38e106ed..e0fc6327a0f5e 100644 --- a/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdProduce.java +++ b/pulsar-client-tools/src/main/java/org/apache/pulsar/client/cli/CmdProduce.java @@ -34,17 +34,15 @@ import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; -import java.util.Arrays; import java.util.Base64; -import java.util.Collections; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; - +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.api.AuthenticationDataProvider; import org.apache.pulsar.client.api.ClientBuilder; @@ -107,6 +105,9 @@ public class CmdProduce { description = "Rate (in msg/sec) at which to produce," + " value 0 means to produce messages as fast as possible.") private double publishRate = 0; + + @Parameter(names = { "-db", "--disable-batching" }, description = "Disable batch sending of messages") + private boolean disableBatching = false; @Parameter(names = { "-c", "--chunking" }, description = "Should split the message and publish in chunks if message size is larger than allowed max size") @@ -201,7 +202,8 @@ public int run() throws PulsarClientException { } if (messages.size() > 0){ - messages = Collections.unmodifiableList(Arrays.asList(messages.get(0).split(separator))); + messages = messages.stream().map(str -> str.split(separator)) + .flatMap(Stream::of).collect(Collectors.toList()); } if (messages.size() == 0 && messageFileNames.size() == 0) { @@ -247,6 +249,8 @@ private int publish(String topic) { if (this.chunkingAllowed) { producerBuilder.enableChunking(true); producerBuilder.enableBatching(false); + } else if (this.disableBatching) { + producerBuilder.enableBatching(false); } if (isNotBlank(this.encKeyName) && isNotBlank(this.encKeyValue)) { producerBuilder.addEncryptionKey(this.encKeyName); diff --git a/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdClusters.java b/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdClusters.java index 0ab093d56c26b..cd4c22f8b2473 100644 --- a/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdClusters.java +++ b/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdClusters.java @@ -62,7 +62,8 @@ public void testCmdClusterConfigFile() throws Exception { testCmdClusterConfigFile(clusterData, clusterData); } - public void testCmdClusterConfigFile(ClusterData testClusterData, ClusterData expectedClusterData) throws Exception { + private void testCmdClusterConfigFile(ClusterData testClusterData, ClusterData expectedClusterData) + throws Exception { File file = Files.createTempFile("tmp_cluster", ".yaml").toFile(); ObjectMapperFactory.getThreadLocalYaml().writeValue(file, testClusterData); Assert.assertEquals(testClusterData, CmdUtils.loadConfig(file.getAbsolutePath(), ClusterData.class)); diff --git a/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSinks.java b/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSinks.java index 87f9a3fff7eda..27402bddbf0e8 100644 --- a/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSinks.java +++ b/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSinks.java @@ -341,7 +341,7 @@ public void testMissingConfig() throws Exception { ); } - public void testCmdSinkCliMissingArgs( + private void testCmdSinkCliMissingArgs( String tenant, String namespace, String name, @@ -492,7 +492,7 @@ public void testCmdSinkConfigFileInvalidJar() throws Exception { testCmdSinkConfigFile(testSinkConfig, expectedSinkConfig); } - public void testCmdSinkConfigFile(SinkConfig testSinkConfig, SinkConfig expectedSinkConfig) throws Exception { + private void testCmdSinkConfigFile(SinkConfig testSinkConfig, SinkConfig expectedSinkConfig) throws Exception { File file = Files.createTempFile("", "").toFile(); @@ -572,7 +572,7 @@ public void testCliOverwriteConfigFile() throws Exception { ); } - public void testMixCliAndConfigFile( + private void testMixCliAndConfigFile( String tenant, String namespace, String name, diff --git a/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSources.java b/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSources.java index 5b4d906b81b75..ff75d26534153 100644 --- a/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSources.java +++ b/pulsar-client-tools/src/test/java/org/apache/pulsar/admin/cli/TestCmdSources.java @@ -261,7 +261,7 @@ public void testMissingConfig() throws Exception { ); } - public void testCmdSourceCliMissingArgs( + private void testCmdSourceCliMissingArgs( String tenant, String namespace, String name, @@ -500,7 +500,7 @@ public void testCliOverwriteConfigFile() throws Exception { ); } - public void testMixCliAndConfigFile( + private void testMixCliAndConfigFile( String tenant, String namespace, String name, @@ -674,4 +674,4 @@ public void testParseConfigs() throws Exception { Assert.assertEquals(config.get("float_string"), "1000.0"); Assert.assertEquals(config.get("created_at"), "Mon Jul 02 00:33:15 +0000 2018"); } -} \ No newline at end of file +} diff --git a/pulsar-client/pom.xml b/pulsar-client/pom.xml index 65aef6af05a5f..ebac828acb1f1 100644 --- a/pulsar-client/pom.xml +++ b/pulsar-client/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -94,6 +94,11 @@ async-http-client + + com.typesafe.netty + netty-reactive-streams + + org.slf4j slf4j-api diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageAcker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageAcker.java index 5f9e617ca39a6..f99c54d4ba3d3 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageAcker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageAcker.java @@ -20,7 +20,7 @@ import java.util.BitSet; -class BatchMessageAcker { +public class BatchMessageAcker { private BatchMessageAcker() { this.bitSet = new BitSet(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java index 9f81e03e5af8a..e0ab2d942ca15 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageContainerImpl.java @@ -19,13 +19,13 @@ package org.apache.pulsar.client.impl; import com.google.common.collect.Lists; - import io.netty.buffer.ByteBuf; - +import io.netty.util.ReferenceCountUtil; import java.io.IOException; import java.util.Arrays; import java.util.List; - +import lombok.Getter; +import lombok.Setter; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.impl.ProducerImpl.OpSendMsg; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; @@ -49,7 +49,11 @@ class BatchMessageContainerImpl extends AbstractBatchMessageContainer { private MessageMetadata messageMetadata = new MessageMetadata(); // sequence id for this batch which will be persisted as a single entry by broker + @Getter + @Setter private long lowestSequenceId = -1L; + @Getter + @Setter private long highestSequenceId = -1L; private ByteBuf batchedMessageMetadataAndPayload; private List> messages = Lists.newArrayList(); @@ -57,6 +61,14 @@ class BatchMessageContainerImpl extends AbstractBatchMessageContainer { // keep track of callbacks for individual messages being published in a batch protected SendCallback firstCallback; + public BatchMessageContainerImpl() { + } + + public BatchMessageContainerImpl(ProducerImpl producer) { + this(); + setProducer(producer); + } + @Override public boolean add(MessageImpl msg, SendCallback callback) { @@ -66,18 +78,24 @@ public boolean add(MessageImpl msg, SendCallback callback) { } if (++numMessagesInBatch == 1) { - // some properties are common amongst the different messages in the batch, hence we just pick it up from - // the first message - messageMetadata.setSequenceId(msg.getSequenceId()); - lowestSequenceId = Commands.initBatchMessageMetadata(messageMetadata, msg.getMessageBuilder()); - this.firstCallback = callback; - batchedMessageMetadataAndPayload = PulsarByteBufAllocator.DEFAULT - .buffer(Math.min(maxBatchSize, ClientCnx.getMaxMessageSize())); - if (msg.getMessageBuilder().hasTxnidMostBits() && currentTxnidMostBits == -1) { - currentTxnidMostBits = msg.getMessageBuilder().getTxnidMostBits(); - } - if (msg.getMessageBuilder().hasTxnidLeastBits() && currentTxnidLeastBits == -1) { - currentTxnidLeastBits = msg.getMessageBuilder().getTxnidLeastBits(); + try { + // some properties are common amongst the different messages in the batch, hence we just pick it up from + // the first message + messageMetadata.setSequenceId(msg.getSequenceId()); + lowestSequenceId = Commands.initBatchMessageMetadata(messageMetadata, msg.getMessageBuilder()); + this.firstCallback = callback; + batchedMessageMetadataAndPayload = PulsarByteBufAllocator.DEFAULT + .buffer(Math.min(maxBatchSize, ClientCnx.getMaxMessageSize())); + if (msg.getMessageBuilder().hasTxnidMostBits() && currentTxnidMostBits == -1) { + currentTxnidMostBits = msg.getMessageBuilder().getTxnidMostBits(); + } + if (msg.getMessageBuilder().hasTxnidLeastBits() && currentTxnidLeastBits == -1) { + currentTxnidLeastBits = msg.getMessageBuilder().getTxnidLeastBits(); + } + } catch (Throwable e) { + log.error("construct first message failed, exception is ", e); + discard(new PulsarClientException(e)); + return false; } } @@ -94,7 +112,6 @@ public boolean add(MessageImpl msg, SendCallback callback) { } highestSequenceId = msg.getSequenceId(); ProducerImpl.LAST_SEQ_ID_PUSHED_UPDATER.getAndUpdate(producer, prev -> Math.max(prev, msg.getSequenceId())); - return isBatchFull(); } @@ -162,6 +179,10 @@ public void discard(Exception ex) { if (firstCallback != null) { firstCallback.sendComplete(ex); } + if (batchedMessageMetadataAndPayload != null) { + ReferenceCountUtil.safeRelease(batchedMessageMetadataAndPayload); + batchedMessageMetadataAndPayload = null; + } } catch (Throwable t) { log.warn("[{}] [{}] Got exception while completing the callback for msg {}:", topicName, producerName, lowestSequenceId, t); @@ -178,11 +199,13 @@ public boolean isMultiBatches() { public OpSendMsg createOpSendMsg() throws IOException { ByteBuf encryptedPayload = producer.encryptMessage(messageMetadata, getCompressedBatchMetadataAndPayload()); if (encryptedPayload.readableBytes() > ClientCnx.getMaxMessageSize()) { + producer.semaphoreRelease(messages.size()); discard(new PulsarClientException.InvalidMessageException( "Message size is bigger than " + ClientCnx.getMaxMessageSize() + " bytes")); return null; } messageMetadata.setNumMessagesInBatch(numMessagesInBatch); + messageMetadata.setSequenceId(lowestSequenceId); messageMetadata.setHighestSequenceId(highestSequenceId); if (currentTxnidMostBits != -1) { messageMetadata.setTxnidMostBits(currentTxnidMostBits); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageIdImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageIdImpl.java index fd8ea72c3aa56..75ab3a8806b66 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageIdImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageIdImpl.java @@ -138,6 +138,10 @@ public int getBatchSize() { return acker.getBatchSize(); } + public int getOriginalBatchSize() { + return this.batchSize; + } + public MessageIdImpl prevBatchMessageId() { return new MessageIdImpl( ledgerId, entryId - 1, partitionIndex); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java index 505ca75743c53..77990eeeacbf1 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BatchMessageKeyBasedContainer.java @@ -18,27 +18,12 @@ */ package org.apache.pulsar.client.impl; -import com.google.common.collect.ComparisonChain; -import com.google.common.collect.Lists; - -import io.netty.buffer.ByteBuf; -import io.netty.util.ReferenceCountUtil; - import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; - -import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; -import org.apache.pulsar.common.api.proto.CompressionType; -import org.apache.pulsar.common.api.proto.MessageMetadata; -import org.apache.pulsar.common.compression.CompressionCodec; -import org.apache.pulsar.common.protocol.ByteBufPair; -import org.apache.pulsar.common.protocol.Commands; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,7 +38,7 @@ */ class BatchMessageKeyBasedContainer extends AbstractBatchMessageContainer { - private Map batches = new HashMap<>(); + private final Map batches = new HashMap<>(); @Override public boolean add(MessageImpl msg, SendCallback callback) { @@ -61,29 +46,16 @@ public boolean add(MessageImpl msg, SendCallback callback) { log.debug("[{}] [{}] add message to batch, num messages in batch so far is {}", topicName, producerName, numMessagesInBatch); } - numMessagesInBatch++; - currentBatchSizeBytes += msg.getDataBuffer().readableBytes(); String key = getKey(msg); - KeyedBatch part = batches.get(key); - if (part == null) { - part = new KeyedBatch(); - part.addMsg(msg, callback); - part.compressionType = compressionType; - part.compressor = compressor; - part.maxBatchSize = maxBatchSize; - part.topicName = topicName; - part.producerName = producerName; - batches.putIfAbsent(key, part); - - if (msg.getMessageBuilder().hasTxnidMostBits() && currentTxnidMostBits == -1) { - currentTxnidMostBits = msg.getMessageBuilder().getTxnidMostBits(); - } - if (msg.getMessageBuilder().hasTxnidLeastBits() && currentTxnidLeastBits == -1) { - currentTxnidLeastBits = msg.getMessageBuilder().getTxnidLeastBits(); - } - - } else { - part.addMsg(msg, callback); + final BatchMessageContainerImpl batchMessageContainer = batches.computeIfAbsent(key, + __ -> new BatchMessageContainerImpl(producer)); + batchMessageContainer.add(msg, callback); + // The `add` method fails iff the container is empty, i.e. the `msg` is the first message to add, while `msg` + // was failed to add. In this case, `clear` method will be called and the batch container is empty and there is + // no need to update the stats. + if (!batchMessageContainer.isEmpty()) { + numMessagesInBatch++; + currentBatchSizeBytes += msg.getDataBuffer().readableBytes(); } return isBatchFull(); } @@ -92,7 +64,7 @@ public boolean add(MessageImpl msg, SendCallback callback) { public void clear() { numMessagesInBatch = 0; currentBatchSizeBytes = 0; - batches = new HashMap<>(); + batches.clear(); currentTxnidMostBits = -1L; currentTxnidLeastBits = -1L; } @@ -104,13 +76,7 @@ public boolean isEmpty() { @Override public void discard(Exception ex) { - try { - // Need to protect ourselves from any exception being thrown in the future handler from the application - batches.forEach((k, v) -> v.firstCallback.sendComplete(ex)); - } catch (Throwable t) { - log.warn("[{}] [{}] Got exception while completing the callback", topicName, producerName, t); - } - batches.forEach((k, v) -> ReferenceCountUtil.safeRelease(v.batchedMessageMetadataAndPayload)); + batches.forEach((k, v) -> v.discard(ex)); clear(); } @@ -119,64 +85,45 @@ public boolean isMultiBatches() { return true; } - private ProducerImpl.OpSendMsg createOpSendMsg(KeyedBatch keyedBatch) throws IOException { - ByteBuf encryptedPayload = producer.encryptMessage(keyedBatch.messageMetadata, keyedBatch.getCompressedBatchMetadataAndPayload()); - if (encryptedPayload.readableBytes() > ClientCnx.getMaxMessageSize()) { - keyedBatch.discard(new PulsarClientException.InvalidMessageException( - "Message size is bigger than " + ClientCnx.getMaxMessageSize() + " bytes")); - return null; - } - - final int numMessagesInBatch = keyedBatch.messages.size(); - long currentBatchSizeBytes = 0; - for (MessageImpl message : keyedBatch.messages) { - currentBatchSizeBytes += message.getDataBuffer().readableBytes(); - } - keyedBatch.messageMetadata.setNumMessagesInBatch(numMessagesInBatch); - if (currentTxnidMostBits != -1) { - keyedBatch.messageMetadata.setTxnidMostBits(currentTxnidMostBits); - } - if (currentTxnidLeastBits != -1) { - keyedBatch.messageMetadata.setTxnidLeastBits(currentTxnidLeastBits); - } - ByteBufPair cmd = producer.sendMessage(producer.producerId, keyedBatch.sequenceId, numMessagesInBatch, - keyedBatch.messageMetadata, encryptedPayload); - - ProducerImpl.OpSendMsg op = ProducerImpl.OpSendMsg.create(keyedBatch.messages, cmd, keyedBatch.sequenceId, keyedBatch.firstCallback); - - op.setNumMessagesInBatch(numMessagesInBatch); - op.setBatchSizeByte(currentBatchSizeBytes); - return op; - } - @Override public List createOpSendMsgs() throws IOException { - List result = new ArrayList<>(); - List list = new ArrayList<>(batches.values()); - list.sort(((o1, o2) -> ComparisonChain.start() - .compare(o1.sequenceId, o2.sequenceId) - .result())); - for (KeyedBatch keyedBatch : list) { - ProducerImpl.OpSendMsg op = createOpSendMsg(keyedBatch); - if (op != null) { - result.add(op); + try { + // In key based batching, the sequence ids might not be ordered, for example, + // | key | sequence id list | + // | :-- | :--------------- | + // | A | 0, 3, 4 | + // | B | 1, 2 | + // The message order should be 1, 2, 0, 3, 4 so that a message with a sequence id <= 4 should be dropped. + // However, for a MessageMetadata with both `sequence_id` and `highest_sequence_id` fields, the broker will + // expect a strict order so that the batch of key "A" (0, 3, 4) will be dropped. + // Therefore, we should update the `sequence_id` field to the highest sequence id and remove the + // `highest_sequence_id` field to allow the weak order. + batches.values().forEach(batchMessageContainer -> { + batchMessageContainer.setLowestSequenceId(batchMessageContainer.getHighestSequenceId()); + }); + return batches.values().stream().sorted((o1, o2) -> + (int) (o1.getLowestSequenceId() - o2.getLowestSequenceId()) + ).map(batchMessageContainer -> { + try { + return batchMessageContainer.createOpSendMsg(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + }).collect(Collectors.toList()); + } catch (IllegalStateException e) { + if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } else { + throw e; } } - return result; } @Override public boolean hasSameSchema(MessageImpl msg) { String key = getKey(msg); - KeyedBatch part = batches.get(key); - if (part == null || part.messages.isEmpty()) { - return true; - } - if (!part.messageMetadata.hasSchemaVersion()) { - return msg.getSchemaVersion() == null; - } - return Arrays.equals(msg.getSchemaVersion(), - part.messageMetadata.getSchemaVersion()); + BatchMessageContainerImpl batchMessageContainer = batches.get(key); + return batchMessageContainer == null || batchMessageContainer.hasSameSchema(msg); } private String getKey(MessageImpl msg) { @@ -186,78 +133,6 @@ private String getKey(MessageImpl msg) { return msg.getKey(); } - private static class KeyedBatch { - private final MessageMetadata messageMetadata = new MessageMetadata(); - // sequence id for this batch which will be persisted as a single entry by broker - private long sequenceId = -1; - private ByteBuf batchedMessageMetadataAndPayload; - private List> messages = Lists.newArrayList(); - private SendCallback previousCallback = null; - private CompressionType compressionType; - private CompressionCodec compressor; - private int maxBatchSize; - private String topicName; - private String producerName; - - // keep track of callbacks for individual messages being published in a batch - private SendCallback firstCallback; - - private ByteBuf getCompressedBatchMetadataAndPayload() { - for (MessageImpl msg : messages) { - batchedMessageMetadataAndPayload = Commands.serializeSingleMessageInBatchWithPayload(msg.getMessageBuilder(), - msg.getDataBuffer(), batchedMessageMetadataAndPayload); - } - int uncompressedSize = batchedMessageMetadataAndPayload.readableBytes(); - ByteBuf compressedPayload = compressor.encode(batchedMessageMetadataAndPayload); - batchedMessageMetadataAndPayload.release(); - if (compressionType != CompressionType.NONE) { - messageMetadata.setCompression(compressionType); - messageMetadata.setUncompressedSize(uncompressedSize); - } - - // Update the current max batch size using the uncompressed size, which is what we need in any case to - // accumulate the batch content - maxBatchSize = Math.max(maxBatchSize, uncompressedSize); - return compressedPayload; - } - - private void addMsg(MessageImpl msg, SendCallback callback) { - if (messages.size() == 0) { - sequenceId = Commands.initBatchMessageMetadata(messageMetadata, msg.getMessageBuilder()); - batchedMessageMetadataAndPayload = PulsarByteBufAllocator.DEFAULT - .buffer(Math.min(maxBatchSize, ClientCnx.getMaxMessageSize())); - firstCallback = callback; - } - if (previousCallback != null) { - previousCallback.addCallback(msg, callback); - } - previousCallback = callback; - messages.add(msg); - } - - public void discard(Exception ex) { - try { - // Need to protect ourselves from any exception being thrown in the future handler from the application - if (firstCallback != null) { - firstCallback.sendComplete(ex); - } - } catch (Throwable t) { - log.warn("[{}] [{}] Got exception while completing the callback for msg {}:", topicName, producerName, - sequenceId, t); - } - clear(); - } - - public void clear() { - messages = Lists.newArrayList(); - firstCallback = null; - previousCallback = null; - messageMetadata.clear(); - sequenceId = -1; - batchedMessageMetadataAndPayload = null; - } - } - private static final Logger log = LoggerFactory.getLogger(BatchMessageKeyBasedContainer.class); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java index eb61a6634213c..ba3281c64cc00 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/BinaryProtoLookupService.java @@ -36,6 +36,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.SchemaSerializationException; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace.Mode; import org.apache.pulsar.common.api.proto.CommandLookupTopicResponse; @@ -217,9 +218,12 @@ public CompletableFuture> getSchema(TopicName topicName) { @Override public CompletableFuture> getSchema(TopicName topicName, byte[] version) { - InetSocketAddress socketAddress = serviceNameResolver.resolveHost(); CompletableFuture> schemaFuture = new CompletableFuture<>(); - + if (version != null && version.length == 0) { + schemaFuture.completeExceptionally(new SchemaSerializationException("Empty schema version")); + return schemaFuture; + } + InetSocketAddress socketAddress = serviceNameResolver.resolveHost(); client.getCnxPool().getConnection(socketAddress).thenAccept(clientCnx -> { long requestId = client.newRequestId(); ByteBuf request = Commands.newGetSchema(requestId, topicName.toString(), diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java index b0895f482d50e..9dda7860e3352 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java @@ -21,16 +21,15 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static org.apache.pulsar.client.impl.TransactionMetaStoreHandler.getExceptionByServerError; - +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Queues; import io.netty.buffer.ByteBuf; import io.netty.channel.Channel; -import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.EventLoopGroup; import io.netty.channel.unix.Errors.NativeIoException; import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import io.netty.handler.ssl.SslHandler; import io.netty.util.concurrent.Promise; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -47,7 +46,8 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import javax.net.ssl.SSLSession; + +import lombok.AccessLevel; import lombok.Getter; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.commons.lang3.tuple.Pair; @@ -57,12 +57,9 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.PulsarClientException.ConnectException; import org.apache.pulsar.client.api.PulsarClientException.TimeoutException; -import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException; -import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.client.impl.BinaryProtoLookupService.LookupDataResult; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; -import org.apache.pulsar.common.api.proto.CommandTcClientConnectResponse; -import org.apache.pulsar.common.tls.TlsHostnameVerifier; +import org.apache.pulsar.client.impl.schema.SchemaInfoUtil; import org.apache.pulsar.client.impl.transaction.TransactionBufferHandler; import org.apache.pulsar.client.util.TimedCompletableFuture; import org.apache.pulsar.common.api.AuthData; @@ -91,10 +88,10 @@ import org.apache.pulsar.common.api.proto.CommandSendError; import org.apache.pulsar.common.api.proto.CommandSendReceipt; import org.apache.pulsar.common.api.proto.CommandSuccess; +import org.apache.pulsar.common.api.proto.CommandTcClientConnectResponse; import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.PulsarHandler; -import org.apache.pulsar.client.impl.schema.SchemaInfoUtil; import org.apache.pulsar.common.protocol.schema.SchemaVersion; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.util.FutureUtil; @@ -106,19 +103,40 @@ public class ClientCnx extends PulsarHandler { protected final Authentication authentication; - private State state; + protected State state; + @Getter private final ConcurrentLongHashMap> pendingRequests = - new ConcurrentLongHashMap<>(16, 1); + ConcurrentLongHashMap.>newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); // LookupRequests that waiting in client side. private final Queue>>> waitingLookupRequests; - private final ConcurrentLongHashMap> producers = new ConcurrentLongHashMap<>(16, 1); - private final ConcurrentLongHashMap> consumers = new ConcurrentLongHashMap<>(16, 1); - private final ConcurrentLongHashMap transactionMetaStoreHandlers = new ConcurrentLongHashMap<>(16, 1); + @VisibleForTesting + final ConcurrentLongHashMap> producers = + ConcurrentLongHashMap.>newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + @VisibleForTesting + final ConcurrentLongHashMap> consumers = + ConcurrentLongHashMap.>newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + private final ConcurrentLongHashMap transactionMetaStoreHandlers = + ConcurrentLongHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); private final CompletableFuture connectionFuture = new CompletableFuture(); private final ConcurrentLinkedQueue requestTimeoutQueue = new ConcurrentLinkedQueue<>(); + + @VisibleForTesting + @Getter(AccessLevel.PACKAGE) private final Semaphore pendingLookupRequestSemaphore; private final Semaphore maxLookupRequestSemaphore; private final EventLoopGroup eventLoopGroup; @@ -133,15 +151,12 @@ public class ClientCnx extends PulsarHandler { private final int maxNumberOfRejectedRequestPerConnection; private final int rejectedRequestResetTimeSec = 60; - private final int protocolVersion; + protected final int protocolVersion; private final long operationTimeoutMs; protected String proxyToTargetBrokerAddress = null; // Remote hostName with which client is connected protected String remoteHostName = null; - private boolean isTlsHostnameVerificationEnable; - - private static final TlsHostnameVerifier HOSTNAME_VERIFIER = new TlsHostnameVerifier(); private ScheduledFuture timeoutTask; private SocketAddress localAddress; @@ -152,7 +167,10 @@ public class ClientCnx extends PulsarHandler { protected AuthenticationDataProvider authenticationDataProvider; private TransactionBufferHandler transactionBufferHandler; - enum State { + @Getter + private long lastDisconnectedTimestamp; + + protected enum State { None, SentConnectFrame, Ready, Failed, Connecting } @@ -207,7 +225,6 @@ public ClientCnx(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, in this.maxNumberOfRejectedRequestPerConnection = conf.getMaxNumberOfRejectedRequestPerConnection(); this.operationTimeoutMs = conf.getOperationTimeoutMs(); this.state = State.None; - this.isTlsHostnameVerificationEnable = conf.isTlsHostnameVerificationEnable(); this.protocolVersion = protocolVersion; } @@ -217,8 +234,9 @@ public void channelActive(ChannelHandlerContext ctx) throws Exception { this.localAddress = ctx.channel().localAddress(); this.remoteAddress = ctx.channel().remoteAddress(); - this.timeoutTask = this.eventLoopGroup.scheduleAtFixedRate(() -> checkRequestTimeout(), operationTimeoutMs, - operationTimeoutMs, TimeUnit.MILLISECONDS); + this.timeoutTask = this.eventLoopGroup + .scheduleAtFixedRate(catchingAndLoggingThrowables(this::checkRequestTimeout), operationTimeoutMs, + operationTimeoutMs, TimeUnit.MILLISECONDS); if (proxyToTargetBrokerAddress == null) { if (log.isDebugEnabled()) { @@ -255,6 +273,7 @@ protected ByteBuf newConnectCommand() throws Exception { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { super.channelInactive(ctx); + lastDisconnectedTimestamp = System.currentTimeMillis(); log.info("{} Disconnected", ctx.channel()); if (!connectionFuture.isDone()) { connectionFuture.completeExceptionally(new PulsarClientException("Connection already closed")); @@ -264,7 +283,11 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { "Disconnected from server at " + ctx.channel().remoteAddress()); // Fail out all the pending ops - pendingRequests.forEach((key, future) -> future.completeExceptionally(e)); + pendingRequests.forEach((key, future) -> { + if (pendingRequests.remove(key, future) && !future.isDone()) { + future.completeExceptionally(e); + } + }); waitingLookupRequests.forEach(pair -> pair.getRight().getRight().completeExceptionally(e)); // Notify all attached producers/consumers so they have a chance to reconnect @@ -272,7 +295,6 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { consumers.forEach((id, consumer) -> consumer.connectionClosed(this)); transactionMetaStoreHandlers.forEach((id, handler) -> handler.connectionClosed(this)); - pendingRequests.clear(); waitingLookupRequests.clear(); producers.clear(); @@ -307,14 +329,6 @@ public static boolean isKnownException(Throwable t) { @Override protected void handleConnected(CommandConnected connected) { - - if (isTlsHostnameVerificationEnable && remoteHostName != null && !verifyTlsHostName(remoteHostName, ctx)) { - // close the connection if host-verification failed with the broker - log.warn("[{}] Failed to verify hostname of {}", ctx.channel(), remoteHostName); - ctx.close(); - return; - } - checkArgument(state == State.SentConnectFrame || state == State.Connecting); if (connected.hasMaxMessageSize()) { if (log.isDebugEnabled()) { @@ -541,7 +555,9 @@ protected void handleProducerSuccess(CommandProducerSuccess success) { @Override protected void handleLookupResponse(CommandLookupTopicResponse lookupResult) { if (log.isDebugEnabled()) { - log.debug("Received Broker lookup response: {}", lookupResult.getResponse()); + CommandLookupTopicResponse.LookupType response = + lookupResult.hasResponse() ? lookupResult.getResponse() : null; + log.debug("Received Broker lookup response: {} {}", lookupResult.getRequestId(), response); } long requestId = lookupResult.getRequestId(); @@ -577,7 +593,11 @@ protected void handleLookupResponse(CommandLookupTopicResponse lookupResult) { @Override protected void handlePartitionResponse(CommandPartitionedTopicMetadataResponse lookupResult) { if (log.isDebugEnabled()) { - log.debug("Received Broker Partition response: {}", lookupResult.getPartitions()); + CommandPartitionedTopicMetadataResponse.LookupType response = + lookupResult.hasResponse() ? lookupResult.getResponse() : null; + int partitions = lookupResult.hasPartitions() ? lookupResult.getPartitions() : -1; + log.debug("Received Broker Partition response: {} {} {}", lookupResult.getRequestId(), response, + partitions); } long requestId = lookupResult.getRequestId(); @@ -674,7 +694,7 @@ protected void handleSendError(CommandSendError sendError) { producers.get(producerId).terminated(this); break; case NotAllowedError: - producers.get(producerId).recoverNotAllowedError(sequenceId); + producers.get(producerId).recoverNotAllowedError(sequenceId, sendError.getMessage()); break; default: @@ -716,7 +736,7 @@ protected void handleError(CommandError error) { protected void handleCloseProducer(CommandCloseProducer closeProducer) { log.info("[{}] Broker notification of Closed producer: {}", remoteAddress, closeProducer.getProducerId()); final long producerId = closeProducer.getProducerId(); - ProducerImpl producer = producers.get(producerId); + ProducerImpl producer = producers.remove(producerId); if (producer != null) { producer.connectionClosed(this); } else { @@ -728,7 +748,7 @@ protected void handleCloseProducer(CommandCloseProducer closeProducer) { protected void handleCloseConsumer(CommandCloseConsumer closeConsumer) { log.info("[{}] Broker notification of Closed consumer: {}", remoteAddress, closeConsumer.getConsumerId()); final long consumerId = closeConsumer.getConsumerId(); - ConsumerImpl consumer = consumers.get(consumerId); + ConsumerImpl consumer = consumers.remove(consumerId); if (consumer != null) { consumer.connectionClosed(this); } else { @@ -745,6 +765,12 @@ public CompletableFuture newLookup(ByteBuf request, long reque TimedCompletableFuture future = new TimedCompletableFuture<>(); if (pendingLookupRequestSemaphore.tryAcquire()) { + future.whenComplete((lookupDataResult, throwable) -> { + if (throwable instanceof ConnectException + || throwable instanceof PulsarClientException.LookupException) { + pendingLookupRequestSemaphore.release(); + } + }); addPendingLookupRequests(requestId, future); ctx.writeAndFlush(request).addListener(writeFuture -> { if (!writeFuture.isSuccess()) { @@ -766,8 +792,9 @@ public CompletableFuture newLookup(ByteBuf request, long reque log.debug("{} Failed to add lookup-request into waiting queue", requestId); } future.completeExceptionally(new PulsarClientException.TooManyRequestsException(String.format( - "Requests number out of config: There are {%s} lookup requests outstanding and {%s} requests pending.", - pendingLookupRequestSemaphore.availablePermits(), + "Requests number out of config: There are {%s} lookup requests outstanding and {%s} requests" + + " pending.", + pendingLookupRequestSemaphore.getQueueLength(), waitingLookupRequests.size()))); } } @@ -869,8 +896,7 @@ private void sendRequestAndHandleTimeout(ByteBuf requestMessage, long reques if (flush) { ctx.writeAndFlush(requestMessage).addListener(writeFuture -> { if (!writeFuture.isSuccess()) { - CompletableFuture newFuture = pendingRequests.remove(requestId); - if (newFuture != null && !newFuture.isDone()) { + if (pendingRequests.remove(requestId, future) && !future.isDone()) { log.warn("{} Failed to send {} to broker: {}", ctx.channel(), requestType.getDescription(), writeFuture.cause().getMessage()); future.completeExceptionally(writeFuture.cause()); @@ -1060,39 +1086,6 @@ private void incrementRejectsAndMaybeClose() { } } - /** - * verifies host name provided in x509 Certificate in tls session - * - * it matches hostname with below scenarios - * - *
-     *  1. Supports IPV4 and IPV6 host matching
-     *  2. Supports wild card matching for DNS-name
-     *  eg:
-     *     HostName                     CN           Result
-     * 1.  localhost                    localhost    PASS
-     * 2.  localhost                    local*       PASS
-     * 3.  pulsar1-broker.com           pulsar*.com  PASS
-     * 
- * - * @param ctx - * @return true if hostname is verified else return false - */ - private boolean verifyTlsHostName(String hostname, ChannelHandlerContext ctx) { - ChannelHandler sslHandler = ctx.channel().pipeline().get("tls"); - - SSLSession sslSession = null; - if (sslHandler != null) { - sslSession = ((SslHandler) sslHandler).engine().getSession(); - if (log.isDebugEnabled()) { - log.debug("Verifying HostName for {}, Cipher {}, Protocols {}", hostname, sslSession.getCipherSuite(), - sslSession.getProtocol()); - } - return HOSTNAME_VERIFIER.verify(hostname, sslSession); - } - return false; - } - void registerConsumer(final long consumerId, final ConsumerImpl consumer) { consumers.put(consumerId, consumer); } @@ -1182,6 +1175,13 @@ public void close() { } } + protected void closeWithException(Throwable e) { + if (ctx != null) { + connectionFuture.completeExceptionally(e); + ctx.close(); + } + } + private void checkRequestTimeout() { while (!requestTimeoutQueue.isEmpty()) { RequestTime request = requestTimeoutQueue.peek(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java index 8fb7ab41d8867..8cd812256ac7f 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionHandler.java @@ -37,7 +37,8 @@ public class ConnectionHandler { protected final Backoff backoff; private static final AtomicLongFieldUpdater EPOCH_UPDATER = AtomicLongFieldUpdater .newUpdater(ConnectionHandler.class, "epoch"); - private volatile long epoch = 0L; + // Start with -1L because it gets incremented before sending on the first connection + private volatile long epoch = -1L; protected volatile long lastConnectionClosedTimestamp = 0L; interface Connection { @@ -103,16 +104,15 @@ protected void reconnectLater(Throwable exception) { long delayMs = backoff.next(); log.warn("[{}] [{}] Could not get connection to broker: {} -- Will try again in {} s", state.topic, state.getHandlerName(), exception.getMessage(), delayMs / 1000.0); - state.setState(State.Connecting); - state.client.timer().newTimeout(timeout -> { - log.info("[{}] [{}] Reconnecting after connection was closed", state.topic, state.getHandlerName()); - incrementEpoch(); - grabCnx(); - }, delayMs, TimeUnit.MILLISECONDS); - } - - protected long incrementEpoch() { - return EPOCH_UPDATER.incrementAndGet(this); + if (state.changeToConnecting()) { + state.client.timer().newTimeout(timeout -> { + log.info("[{}] [{}] Reconnecting after connection was closed", state.topic, state.getHandlerName()); + grabCnx(); + }, delayMs, TimeUnit.MILLISECONDS); + } else { + log.info("[{}] [{}] Ignoring reconnection request (state: {})", + state.topic, state.getHandlerName(), state.getState()); + } } public void connectionClosed(ClientCnx cnx) { @@ -129,7 +129,6 @@ public void connectionClosed(ClientCnx cnx) { delayMs / 1000.0); state.client.timer().newTimeout(timeout -> { log.info("[{}] [{}] Reconnecting after timeout", state.topic, state.getHandlerName()); - incrementEpoch(); grabCnx(); }, delayMs, TimeUnit.MILLISECONDS); } @@ -147,11 +146,23 @@ protected void setClientCnx(ClientCnx clientCnx) { CLIENT_CNX_UPDATER.set(this, clientCnx); } + /** + * Update the {@link ClientCnx} for the class, then increment and get the epoch value. Note that the epoch value is + * currently only used by the {@link ProducerImpl}. + * @param clientCnx - the new {@link ClientCnx} + * @return the epoch value to use for this pair of {@link ClientCnx} and {@link ProducerImpl} + */ + protected long switchClientCnx(ClientCnx clientCnx) { + setClientCnx(clientCnx); + return EPOCH_UPDATER.incrementAndGet(this); + } + private boolean isValidStateForReconnection() { State state = this.state.getState(); switch (state) { case Uninitialized: case Connecting: + case RegisteringSchema: case Ready: // Ok return true; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java index 9cf2da1765342..433f20b3dafb2 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConnectionPool.java @@ -18,47 +18,45 @@ */ package org.apache.pulsar.client.impl; -import static org.apache.pulsar.common.util.netty.ChannelFutures.toCompletableFuture; import static org.apache.pulsar.client.util.MathUtils.signSafeMod; - +import static org.apache.pulsar.common.util.netty.ChannelFutures.toCompletableFuture; import com.google.common.annotations.VisibleForTesting; - import io.netty.bootstrap.Bootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelException; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; -import io.netty.resolver.dns.DnsNameResolver; +import io.netty.resolver.AddressResolver; +import io.netty.resolver.dns.DnsAddressResolverGroup; import io.netty.resolver.dns.DnsNameResolverBuilder; import io.netty.util.concurrent.Future; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Random; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.PulsarClientException.InvalidServiceURL; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.netty.DnsResolverUtil; import org.apache.pulsar.common.util.netty.EventLoopUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class ConnectionPool implements Closeable { +public class ConnectionPool implements AutoCloseable { protected final ConcurrentHashMap>> pool; private final Bootstrap bootstrap; @@ -68,14 +66,22 @@ public class ConnectionPool implements Closeable { private final int maxConnectionsPerHosts; private final boolean isSniProxy; - protected final DnsNameResolver dnsResolver; + protected final AddressResolver addressResolver; + private final boolean shouldCloseDnsResolver; public ConnectionPool(ClientConfigurationData conf, EventLoopGroup eventLoopGroup) throws PulsarClientException { this(conf, eventLoopGroup, () -> new ClientCnx(conf, eventLoopGroup)); } public ConnectionPool(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, - Supplier clientCnxSupplier) throws PulsarClientException { + Supplier clientCnxSupplier) throws PulsarClientException { + this(conf, eventLoopGroup, clientCnxSupplier, Optional.empty()); + } + + public ConnectionPool(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, + Supplier clientCnxSupplier, + Optional> addressResolver) + throws PulsarClientException { this.eventLoopGroup = eventLoopGroup; this.clientConfig = conf; this.maxConnectionsPerHosts = conf.getConnectionsPerBroker(); @@ -99,8 +105,19 @@ public ConnectionPool(ClientConfigurationData conf, EventLoopGroup eventLoopGrou throw new PulsarClientException(e); } - this.dnsResolver = new DnsNameResolverBuilder(eventLoopGroup.next()).traceEnabled(true) - .channelType(EventLoopUtil.getDatagramChannelClass(eventLoopGroup)).build(); + this.shouldCloseDnsResolver = !addressResolver.isPresent(); + this.addressResolver = addressResolver.orElseGet(() -> createAddressResolver(conf, eventLoopGroup)); + } + + private static AddressResolver createAddressResolver(ClientConfigurationData conf, + EventLoopGroup eventLoopGroup) { + DnsNameResolverBuilder dnsNameResolverBuilder = new DnsNameResolverBuilder() + .traceEnabled(true).channelType(EventLoopUtil.getDatagramChannelClass(eventLoopGroup)); + DnsResolverUtil.applyJdkDnsCacheSettings(dnsNameResolverBuilder); + // use DnsAddressResolverGroup to create the AddressResolver since it contains a solution + // to prevent cache stampede / thundering herds problem when a DNS entry expires while the system + // is under high load + return new DnsAddressResolverGroup(dnsNameResolverBuilder).getResolver(eventLoopGroup.next()); } private static final Random random = new Random(); @@ -222,22 +239,20 @@ private CompletableFuture createConnection(InetSocketAddress logicalA } /** - * Resolve DNS asynchronously and attempt to connect to any IP address returned by DNS server + * Resolve DNS asynchronously and attempt to connect to any IP address returned by DNS server. */ private CompletableFuture createConnection(InetSocketAddress unresolvedAddress) { - int port; - CompletableFuture> resolvedAddress; + CompletableFuture> resolvedAddress; try { if (isSniProxy) { URI proxyURI = new URI(clientConfig.getProxyServiceUrl()); - port = proxyURI.getPort(); - resolvedAddress = resolveName(proxyURI.getHost()); + resolvedAddress = + resolveName(InetSocketAddress.createUnresolved(proxyURI.getHost(), proxyURI.getPort())); } else { - port = unresolvedAddress.getPort(); - resolvedAddress = resolveName(unresolvedAddress.getHostString()); + resolvedAddress = resolveName(unresolvedAddress); } return resolvedAddress.thenCompose( - inetAddresses -> connectToResolvedAddresses(inetAddresses.iterator(), port, + inetAddresses -> connectToResolvedAddresses(inetAddresses.iterator(), isSniProxy ? unresolvedAddress : null)); } catch (URISyntaxException e) { log.error("Invalid Proxy url {}", clientConfig.getProxyServiceUrl(), e); @@ -247,34 +262,38 @@ private CompletableFuture createConnection(InetSocketAddress unresolved } /** - * Try to connect to a sequence of IP addresses until a successfull connection can be made, or fail if no address is - * working + * Try to connect to a sequence of IP addresses until a successful connection can be made, or fail if no + * address is working. */ - private CompletableFuture connectToResolvedAddresses(Iterator unresolvedAddresses, int port, InetSocketAddress sniHost) { + private CompletableFuture connectToResolvedAddresses(Iterator unresolvedAddresses, + InetSocketAddress sniHost) { CompletableFuture future = new CompletableFuture<>(); // Successfully connected to server - connectToAddress(unresolvedAddresses.next(), port, sniHost).thenAccept(future::complete).exceptionally(exception -> { - if (unresolvedAddresses.hasNext()) { - // Try next IP address - connectToResolvedAddresses(unresolvedAddresses, port, sniHost).thenAccept(future::complete).exceptionally(ex -> { - // This is already unwinding the recursive call - future.completeExceptionally(ex); + connectToAddress(unresolvedAddresses.next(), sniHost) + .thenAccept(future::complete) + .exceptionally(exception -> { + if (unresolvedAddresses.hasNext()) { + // Try next IP address + connectToResolvedAddresses(unresolvedAddresses, sniHost).thenAccept(future::complete) + .exceptionally(ex -> { + // This is already unwinding the recursive call + future.completeExceptionally(ex); + return null; + }); + } else { + // Failed to connect to any IP address + future.completeExceptionally(exception); + } return null; }); - } else { - // Failed to connect to any IP address - future.completeExceptionally(exception); - } - return null; - }); return future; } - CompletableFuture> resolveName(String hostname) { - CompletableFuture> future = new CompletableFuture<>(); - dnsResolver.resolveAll(hostname).addListener((Future> resolveFuture) -> { + CompletableFuture> resolveName(InetSocketAddress unresolvedAddress) { + CompletableFuture> future = new CompletableFuture<>(); + addressResolver.resolveAll(unresolvedAddress).addListener((Future> resolveFuture) -> { if (resolveFuture.isSuccess()) { future.complete(resolveFuture.get()); } else { @@ -285,20 +304,18 @@ CompletableFuture> resolveName(String hostname) { } /** - * Attempt to establish a TCP connection to an already resolved single IP address + * Attempt to establish a TCP connection to an already resolved single IP address. */ - private CompletableFuture connectToAddress(InetAddress ipAddress, int port, InetSocketAddress sniHost) { - InetSocketAddress remoteAddress = new InetSocketAddress(ipAddress, port); + private CompletableFuture connectToAddress(InetSocketAddress remoteAddress, InetSocketAddress sniHost) { if (clientConfig.isUseTls()) { return toCompletableFuture(bootstrap.register()) .thenCompose(channel -> channelInitializerHandler .initTls(channel, sniHost != null ? sniHost : remoteAddress)) - .thenCompose(channel -> channelInitializerHandler - .initSocks5IfConfig(channel)) + .thenCompose(channelInitializerHandler::initSocks5IfConfig) .thenCompose(channel -> toCompletableFuture(channel.connect(remoteAddress))); } else { return toCompletableFuture(bootstrap.register()) - .thenCompose(channel -> channelInitializerHandler.initSocks5IfConfig(channel)) + .thenCompose(channelInitializerHandler::initSocks5IfConfig) .thenCompose(channel -> toCompletableFuture(channel.connect(remoteAddress))); } } @@ -307,7 +324,7 @@ public void releaseConnection(ClientCnx cnx) { if (maxConnectionsPerHosts == 0) { //Disable pooling if (cnx.channel().isActive()) { - if(log.isDebugEnabled()) { + if (log.isDebugEnabled()) { log.debug("close connection due to pooling disabled."); } cnx.close(); @@ -316,15 +333,11 @@ public void releaseConnection(ClientCnx cnx) { } @Override - public void close() throws IOException { - try { - if (!eventLoopGroup.isShutdown()) { - eventLoopGroup.shutdownGracefully(0, 10, TimeUnit.SECONDS).await(); - } - } catch (InterruptedException e) { - log.warn("EventLoopGroup shutdown was interrupted", e); + public void close() throws Exception { + closeAllConnections(); + if (shouldCloseDnsResolver) { + addressResolver.close(); } - dnsResolver.close(); } private void cleanupConnection(InetSocketAddress address, int connectionKey, @@ -335,6 +348,11 @@ private void cleanupConnection(InetSocketAddress address, int connectionKey, } } + public Set> getConnections() { + return Collections.unmodifiableSet( + pool.values().stream().flatMap(n -> n.values().stream()).collect(Collectors.toSet())); + } + @VisibleForTesting int getPoolSize() { return pool.values().stream().mapToInt(Map::size).sum(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java index 45505e48a205e..49e9acac05643 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java @@ -32,9 +32,7 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -70,8 +68,8 @@ public abstract class ConsumerBase extends HandlerState implements Consumer listener; protected final ConsumerEventListener consumerEventListener; protected final ExecutorProvider executorProvider; - protected final ScheduledExecutorService externalPinnedExecutor; - protected final ScheduledExecutorService internalPinnedExecutor; + protected final ExecutorService externalPinnedExecutor; + protected final ExecutorService internalPinnedExecutor; final BlockingQueue> incomingMessages; protected ConcurrentOpenHashMap unAckedChunkedMessageIdSequenceMap; protected final ConcurrentLinkedQueue>> pendingReceives; @@ -85,7 +83,6 @@ public abstract class ConsumerBase extends HandlerState implements Consumer conf, int receiverQueueSize, ExecutorProvider executorProvider, @@ -101,10 +98,11 @@ protected ConsumerBase(PulsarClientImpl client, String topic, ConsumerConfigurat this.consumerEventListener = conf.getConsumerEventListener(); // Always use growable queue since items can exceed the advertised size this.incomingMessages = new GrowableArrayBlockingQueue<>(); - this.unAckedChunkedMessageIdSequenceMap = new ConcurrentOpenHashMap<>(); + this.unAckedChunkedMessageIdSequenceMap = + ConcurrentOpenHashMap.newBuilder().build(); this.executorProvider = executorProvider; - this.externalPinnedExecutor = (ScheduledExecutorService) executorProvider.getExecutor(); - this.internalPinnedExecutor = (ScheduledExecutorService) client.getInternalExecutorService(); + this.externalPinnedExecutor = executorProvider.getExecutor(); + this.internalPinnedExecutor = client.getInternalExecutorService(); this.pendingReceives = Queues.newConcurrentLinkedQueue(); this.pendingBatchReceives = Queues.newConcurrentLinkedQueue(); this.schema = schema; @@ -136,9 +134,12 @@ protected ConsumerBase(PulsarClientImpl client, String topic, ConsumerConfigurat } else { this.batchReceivePolicy = BatchReceivePolicy.DEFAULT_POLICY; } + } - if (batchReceivePolicy.getTimeoutMs() > 0) { - batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, batchReceivePolicy.getTimeoutMs(), TimeUnit.MILLISECONDS); + protected void triggerBatchReceiveTimeoutTask() { + if (!hasBatchReceiveTimeout() && batchReceivePolicy.getTimeoutMs() > 0) { + batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, + batchReceivePolicy.getTimeoutMs(), TimeUnit.MILLISECONDS); } } @@ -211,13 +212,10 @@ protected boolean hasNextPendingReceive() { protected CompletableFuture> nextPendingReceive() { CompletableFuture> receivedFuture; - while (true) { + do { receivedFuture = pendingReceives.poll(); // skip done futures (cancelling a future could mark it done) - if (receivedFuture == null || !receivedFuture.isDone()) { - break; - } - } + } while (receivedFuture != null && receivedFuture.isDone()); return receivedFuture; } @@ -490,6 +488,10 @@ public CompletableFuture acknowledgeAsync(MessageId messageId, if (null != txn) { checkArgument(txn instanceof TransactionImpl); txnImpl = (TransactionImpl) txn; + CompletableFuture completableFuture = new CompletableFuture<>(); + if (!txnImpl.checkIfOpen(completableFuture)) { + return completableFuture; + } } return doAcknowledgeWithTxn(messageId, AckType.Individual, Collections.emptyMap(), txnImpl); } @@ -717,13 +719,13 @@ protected void onNegativeAcksSend(Set messageIds) { protected void onAckTimeoutSend(Set messageIds) { if (interceptors != null) { - interceptors. onAckTimeoutSend(this, messageIds); + interceptors.onAckTimeoutSend(this, messageIds); } } protected void onPartitionsChange(String topicName, int partitions) { if (interceptors != null) { - interceptors. onPartitionsChange(topicName, partitions); + interceptors.onPartitionsChange(topicName, partitions); } } @@ -866,7 +868,7 @@ private void doPendingBatchReceiveTask(Timeout timeout) { } long timeToWaitMs; - + boolean hasPendingReceives = false; synchronized (this) { // If it's closing/closed we need to ignore this timeout and not schedule next timeout. if (getState() == State.Closing || getState() == State.Closed) { @@ -903,43 +905,58 @@ private void doPendingBatchReceiveTask(Timeout timeout) { } else { // The diff is greater than zero, set the timeout to the diff value timeToWaitMs = diff; + hasPendingReceives = true; break; } opBatchReceive = pendingBatchReceives.peek(); } - batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, - timeToWaitMs, TimeUnit.MILLISECONDS); + if (hasPendingReceives) { + batchReceiveTimeout = client.timer().newTimeout(this::pendingBatchReceiveTask, + timeToWaitMs, TimeUnit.MILLISECONDS); + } else { + batchReceiveTimeout = null; + } } } - protected void triggerListener() { - // Trigger the notification on the message listener in a separate thread to avoid blocking the networking - // thread while the message processing happens - try { - // Control executor to call MessageListener one by one. - if (executorQueueSize.get() < 1) { - final Message msg = internalReceive(0, TimeUnit.MILLISECONDS); - if (msg != null) { - executorQueueSize.incrementAndGet(); - if (SubscriptionType.Key_Shared == conf.getSubscriptionType()) { - executorProvider.getExecutor(peekMessageKey(msg)).execute(() -> - callMessageListener(msg)); + protected void tryTriggerListener() { + if (listener != null) { + triggerListener(); + } + } + + private void triggerListener() { + // The messages are added into the receiver queue by the internal pinned executor, + // so need to use internal pinned executor to avoid race condition which message + // might be added into the receiver queue but not able to read here. + internalPinnedExecutor.execute(() -> { + try { + Message msg; + do { + msg = internalReceive(0, TimeUnit.MILLISECONDS); + if (msg != null) { + // Trigger the notification on the message listener in a separate thread to avoid blocking the + // internal pinned executor thread while the message processing happens + final Message finalMsg = msg; + if (SubscriptionType.Key_Shared == conf.getSubscriptionType()) { + executorProvider.getExecutor(peekMessageKey(msg)).execute(() -> + callMessageListener(finalMsg)); + } else { + getExternalExecutor(msg).execute(() -> { + callMessageListener(finalMsg); + }); + } } else { - getExternalExecutor(msg).execute(() -> { - callMessageListener(msg); - }); + if (log.isDebugEnabled()) { + log.debug("[{}] [{}] Message has been cleared from the queue", topic, subscription); + } } - } + } while (msg != null); + } catch (PulsarClientException e) { + log.warn("[{}] [{}] Failed to dequeue the message for listener", topic, subscription, e); } - } catch (PulsarClientException e) { - log.warn("[{}] [{}] Failed to dequeue the message for listener", topic, subscription, e); - return; - } - - if (log.isDebugEnabled()) { - log.debug("[{}] [{}] Message has been cleared from the queue", topic, subscription); - } + }); } protected void callMessageListener(Message msg) { @@ -948,13 +965,16 @@ protected void callMessageListener(Message msg) { log.debug("[{}][{}] Calling message listener for message {}", topic, subscription, msg.getMessageId()); } + ConsumerImpl receivedConsumer = (msg instanceof TopicMessageImpl) + ? ((TopicMessageImpl) msg).receivedByconsumer : (ConsumerImpl) this; + // Increase the permits here since we will not increase permits while receive messages from consumer + // after enabled message listener. + receivedConsumer.increaseAvailablePermits((MessageImpl) (msg instanceof TopicMessageImpl + ? ((TopicMessageImpl) msg).getMessage() : msg)); listener.received(ConsumerBase.this, msg); } catch (Throwable t) { log.error("[{}][{}] Message listener error in processing message: {}", topic, subscription, msg.getMessageId(), t); - } finally { - executorQueueSize.decrementAndGet(); - triggerListener(); } } @@ -1022,5 +1042,9 @@ private ExecutorService getInternalExecutor(Message msg) { return executor; } + public boolean hasBatchReceiveTimeout() { + return batchReceiveTimeout != null; + } + private static final Logger log = LoggerFactory.getLogger(ConsumerBase.class); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java index cbfc27d2b0381..a3291db70950d 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBuilderImpl.java @@ -24,9 +24,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.regex.Pattern; import java.util.stream.Collectors; import lombok.AccessLevel; @@ -56,6 +54,7 @@ import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; import org.apache.pulsar.client.util.RetryMessageUtil; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.common.util.FutureUtil; @Getter(AccessLevel.PUBLIC) @@ -117,48 +116,63 @@ public CompletableFuture> subscribeAsync() { return FutureUtil.failedFuture( new InvalidConfigurationException("KeySharedPolicy must set with KeyShared subscription")); } - if(conf.isRetryEnable() && conf.getTopicNames().size() > 0 ) { + CompletableFuture applyDLQConfig; + if (conf.isRetryEnable() && conf.getTopicNames().size() > 0) { TopicName topicFirst = TopicName.get(conf.getTopicNames().iterator().next()); - String retryLetterTopic = topicFirst + "-" + conf.getSubscriptionName() + RetryMessageUtil.RETRY_GROUP_TOPIC_SUFFIX; - String deadLetterTopic = topicFirst + "-" + conf.getSubscriptionName() + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX; - //Issue 9327: do compatibility check in case of the default retry and dead letter topic name changed - String oldRetryLetterTopic = topicFirst.getNamespace() + "/" + conf.getSubscriptionName() + RetryMessageUtil.RETRY_GROUP_TOPIC_SUFFIX; - String oldDeadLetterTopic = topicFirst.getNamespace() + "/" + conf.getSubscriptionName() + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX; - try { - if (client.getPartitionedTopicMetadata(oldRetryLetterTopic) - .get(client.conf.getOperationTimeoutMs(), TimeUnit.MILLISECONDS).partitions > 0) { - retryLetterTopic = oldRetryLetterTopic; - } - if (client.getPartitionedTopicMetadata(oldDeadLetterTopic) - .get(client.conf.getOperationTimeoutMs(), TimeUnit.MILLISECONDS).partitions > 0) { - deadLetterTopic = oldDeadLetterTopic; - } - } catch (InterruptedException | TimeoutException e) { - return FutureUtil.failedFuture(e); - } catch (ExecutionException e) { - return FutureUtil.failedFuture(e.getCause()); - } - - if(conf.getDeadLetterPolicy() == null) { - conf.setDeadLetterPolicy(DeadLetterPolicy.builder() + String oldRetryLetterTopic = topicFirst.getNamespace() + "/" + conf.getSubscriptionName() + + RetryMessageUtil.RETRY_GROUP_TOPIC_SUFFIX; + String oldDeadLetterTopic = topicFirst.getNamespace() + "/" + conf.getSubscriptionName() + + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX; + DeadLetterPolicy deadLetterPolicy = conf.getDeadLetterPolicy(); + if (deadLetterPolicy == null || StringUtils.isBlank(deadLetterPolicy.getRetryLetterTopic()) + || StringUtils.isBlank(deadLetterPolicy.getDeadLetterTopic())) { + CompletableFuture retryLetterTopicMetadata = + client.getPartitionedTopicMetadata(oldRetryLetterTopic); + CompletableFuture deadLetterTopicMetadata = + client.getPartitionedTopicMetadata(oldDeadLetterTopic); + applyDLQConfig = CompletableFuture.allOf(retryLetterTopicMetadata, deadLetterTopicMetadata) + .thenAccept(__ -> { + String retryLetterTopic = topicFirst + "-" + conf.getSubscriptionName() + + RetryMessageUtil.RETRY_GROUP_TOPIC_SUFFIX; + String deadLetterTopic = topicFirst + "-" + conf.getSubscriptionName() + + RetryMessageUtil.DLQ_GROUP_TOPIC_SUFFIX; + if (retryLetterTopicMetadata.join().partitions > 0) { + retryLetterTopic = oldRetryLetterTopic; + } + if (deadLetterTopicMetadata.join().partitions > 0) { + deadLetterTopic = oldDeadLetterTopic; + } + if (deadLetterPolicy == null) { + conf.setDeadLetterPolicy(DeadLetterPolicy.builder() .maxRedeliverCount(RetryMessageUtil.MAX_RECONSUMETIMES) .retryLetterTopic(retryLetterTopic) .deadLetterTopic(deadLetterTopic) .build()); + } else { + if (StringUtils.isBlank(deadLetterPolicy.getRetryLetterTopic())) { + conf.getDeadLetterPolicy().setRetryLetterTopic(retryLetterTopic); + } + if (StringUtils.isBlank(deadLetterPolicy.getDeadLetterTopic())) { + conf.getDeadLetterPolicy().setDeadLetterTopic(deadLetterTopic); + } + } + conf.getTopicNames().add(conf.getDeadLetterPolicy().getRetryLetterTopic()); + }); } else { - if (StringUtils.isBlank(conf.getDeadLetterPolicy().getRetryLetterTopic())) { - conf.getDeadLetterPolicy().setRetryLetterTopic(retryLetterTopic); - } - if (StringUtils.isBlank(conf.getDeadLetterPolicy().getDeadLetterTopic())) { - conf.getDeadLetterPolicy().setDeadLetterTopic(deadLetterTopic); - } + conf.getTopicNames().add(conf.getDeadLetterPolicy().getRetryLetterTopic()); + applyDLQConfig = CompletableFuture.completedFuture(null); } - conf.getTopicNames().add(conf.getDeadLetterPolicy().getRetryLetterTopic()); + } else { + applyDLQConfig = CompletableFuture.completedFuture(null); } - return interceptorList == null || interceptorList.size() == 0 ? - client.subscribeAsync(conf, schema, null) : - client.subscribeAsync(conf, schema, new ConsumerInterceptors<>(interceptorList)); + return applyDLQConfig.thenCompose(__ -> { + if (interceptorList == null || interceptorList.size() == 0) { + return client.subscribeAsync(conf, schema, null); + } else { + return client.subscribeAsync(conf, schema, new ConsumerInterceptors<>(interceptorList)); + } + }); } @Override @@ -332,7 +346,7 @@ public ConsumerBuilder autoAckOldestChunkedMessageOnQueueFull(boolean autoAck conf.setAutoAckOldestChunkedMessageOnQueueFull(autoAckOldestChunkedMessageOnQueueFull); return this; } - + @Override public ConsumerBuilder property(String key, String value) { checkArgument(StringUtils.isNotBlank(key) && StringUtils.isNotBlank(value), @@ -414,8 +428,9 @@ public ConsumerBuilder deadLetterPolicy(DeadLetterPolicy deadLetterPolicy) { if (conf.getAckTimeoutMillis() == 0) { conf.setAckTimeoutMillis(DEFAULT_ACK_TIMEOUT_MILLIS_FOR_DEAD_LETTER); } - conf.setDeadLetterPolicy(deadLetterPolicy); + checkArgument(deadLetterPolicy.getMaxRedeliverCount() > 0, "MaxRedeliverCount must be > 0."); } + conf.setDeadLetterPolicy(deadLetterPolicy); return this; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java index b29d6c73deff6..89e434d41d459 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java @@ -20,12 +20,12 @@ import static com.google.common.base.Preconditions.checkArgument; import static org.apache.pulsar.common.protocol.Commands.hasChecksum; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.collect.ComparisonChain; import com.google.common.collect.Iterables; import com.scurrilous.circe.checksum.Crc32cIntChecksum; import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.util.Recycler; import io.netty.util.Recycler.Handle; @@ -48,6 +48,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -137,6 +138,7 @@ public class ConsumerImpl extends ConsumerBase implements ConnectionHandle private final int partitionIndex; private final boolean hasParentConsumer; + private final boolean parentConsumerHasListener; private final int receiverQueueRefillThreshold; @@ -182,7 +184,8 @@ public class ConsumerImpl extends ConsumerBase implements ConnectionHandle protected volatile boolean paused; - protected ConcurrentOpenHashMap chunkedMessagesMap = new ConcurrentOpenHashMap<>(); + protected ConcurrentOpenHashMap chunkedMessagesMap = + ConcurrentOpenHashMap.newBuilder().build(); private int pendingChunkedMessageCount = 0; protected long expireTimeOfIncompleteChunkedMessageMillis = 0; private boolean expireChunkMessageTaskScheduled = false; @@ -210,8 +213,8 @@ static ConsumerImpl newConsumerImpl(PulsarClientImpl client, Schema schema, ConsumerInterceptors interceptors, boolean createTopicIfDoesNotExist) { - return newConsumerImpl(client, topic, conf, executorProvider, partitionIndex, hasParentConsumer, subscribeFuture, - startMessageId, schema, interceptors, createTopicIfDoesNotExist, 0); + return newConsumerImpl(client, topic, conf, executorProvider, partitionIndex, hasParentConsumer, false, + subscribeFuture, startMessageId, schema, interceptors, createTopicIfDoesNotExist, 0); } static ConsumerImpl newConsumerImpl(PulsarClientImpl client, @@ -220,6 +223,7 @@ static ConsumerImpl newConsumerImpl(PulsarClientImpl client, ExecutorProvider executorProvider, int partitionIndex, boolean hasParentConsumer, + boolean parentConsumerHasListener, CompletableFuture> subscribeFuture, MessageId startMessageId, Schema schema, @@ -233,14 +237,16 @@ static ConsumerImpl newConsumerImpl(PulsarClientImpl client, createTopicIfDoesNotExist); } else { return new ConsumerImpl<>(client, topic, conf, executorProvider, partitionIndex, hasParentConsumer, - subscribeFuture, startMessageId, startMessageRollbackDurationInSec /* rollback time in sec to start msgId */, + parentConsumerHasListener, + subscribeFuture, startMessageId, + startMessageRollbackDurationInSec /* rollback time in sec to start msgId */, schema, interceptors, createTopicIfDoesNotExist); } } protected ConsumerImpl(PulsarClientImpl client, String topic, ConsumerConfigurationData conf, ExecutorProvider executorProvider, int partitionIndex, boolean hasParentConsumer, - CompletableFuture> subscribeFuture, MessageId startMessageId, + boolean parentConsumerHasListener, CompletableFuture> subscribeFuture, MessageId startMessageId, long startMessageRollbackDurationInSec, Schema schema, ConsumerInterceptors interceptors, boolean createTopicIfDoesNotExist) { super(client, topic, conf, conf.getReceiverQueueSize(), executorProvider, subscribeFuture, schema, interceptors); @@ -254,6 +260,7 @@ protected ConsumerImpl(PulsarClientImpl client, String topic, ConsumerConfigurat this.partitionIndex = partitionIndex; this.hasParentConsumer = hasParentConsumer; this.receiverQueueRefillThreshold = conf.getReceiverQueueSize() / 2; + this.parentConsumerHasListener = parentConsumerHasListener; this.priorityLevel = conf.getPriorityLevel(); this.readCompacted = conf.isReadCompacted(); this.subscriptionInitialPosition = conf.getSubscriptionInitialPosition(); @@ -427,8 +434,7 @@ protected CompletableFuture> internalReceiveAsync() { if (message == null) { pendingReceives.add(result); cancellationHandler.setCancelAction(() -> pendingReceives.remove(result)); - } - if (message != null) { + } else { messageProcessed(message); result.complete(beforeConsume(message)); } @@ -494,6 +500,7 @@ protected CompletableFuture> internalBatchReceiveAsync() { } else { OpBatchReceive opBatchReceive = OpBatchReceive.of(result); pendingBatchReceives.add(opBatchReceive); + triggerBatchReceiveTimeoutTask(); cancellationHandler.setCancelAction(() -> pendingBatchReceives.remove(opBatchReceive)); } }); @@ -569,7 +576,9 @@ protected CompletableFuture doReconsumeLater(Message message, AckType a .create(); } } catch (Exception e) { - log.error("Create retry letter producer exception with topic: {}", deadLetterPolicy.getRetryLetterTopic(), e); + log.error("Create retry letter producer exception with topic: {}", + deadLetterPolicy.getRetryLetterTopic(), e); + return FutureUtil.failedFuture(e); } finally { createProducerLock.writeLock().unlock(); } @@ -590,9 +599,9 @@ protected CompletableFuture doReconsumeLater(Message message, AckType a propertiesMap.put(RetryMessageUtil.SYSTEM_PROPERTY_RECONSUMETIMES, String.valueOf(reconsumetimes)); propertiesMap.put(RetryMessageUtil.SYSTEM_PROPERTY_DELAY_TIME, String.valueOf(unit.toMillis(delayTime))); + MessageId finalMessageId = messageId; if (reconsumetimes > this.deadLetterPolicy.getMaxRedeliverCount() && StringUtils.isNotBlank(deadLetterPolicy.getDeadLetterTopic())) { initDeadLetterProducerIfNeeded(); - MessageId finalMessageId = messageId; deadLetterProducer.thenAccept(dlqProducer -> { TypedMessageBuilder typedMessageBuilderNew = dlqProducer.newMessage(Schema.AUTO_PRODUCE_BYTES(retryMessage.getReaderSchema().get())) @@ -624,18 +633,22 @@ protected CompletableFuture doReconsumeLater(Message message, AckType a if (message.hasKey()) { typedMessageBuilderNew.key(message.getKey()); } - typedMessageBuilderNew.send(); - return doAcknowledge(messageId, ackType, properties, null); + typedMessageBuilderNew.sendAsync() + .thenCompose(__ -> doAcknowledge(finalMessageId, ackType, properties, null)) + .thenAccept(v -> result.complete(null)) + .exceptionally(ex -> { + result.completeExceptionally(ex); + return null; + }); } } catch (Exception e) { - log.error("Send to retry letter topic exception with topic: {}, messageId: {}", retryLetterProducer.getTopic(), messageId, e); - Set messageIds = Collections.singleton(messageId); - unAckedMessageTracker.remove(messageId); - redeliverUnacknowledgedMessages(messageIds); + result.completeExceptionally(e); } } MessageId finalMessageId = messageId; result.exceptionally(ex -> { + log.error("Send to retry letter topic exception with topic: {}, messageId: {}", + retryLetterProducer.getTopic(), finalMessageId, ex); Set messageIds = Collections.singleton(finalMessageId); unAckedMessageTracker.remove(finalMessageId); redeliverUnacknowledgedMessages(messageIds); @@ -649,8 +662,8 @@ private SortedMap getPropertiesMap(Message message, String or if (message.getProperties() != null) { propertiesMap.putAll(message.getProperties()); } - propertiesMap.put(RetryMessageUtil.SYSTEM_PROPERTY_REAL_TOPIC, originTopicNameStr); - propertiesMap.put(RetryMessageUtil.SYSTEM_PROPERTY_ORIGIN_MESSAGE_ID, originMessageIdStr); + propertiesMap.putIfAbsent(RetryMessageUtil.SYSTEM_PROPERTY_REAL_TOPIC, originTopicNameStr); + propertiesMap.putIfAbsent(RetryMessageUtil.SYSTEM_PROPERTY_ORIGIN_MESSAGE_ID, originMessageIdStr); return propertiesMap; } @@ -781,35 +794,46 @@ public void connectionOpened(final ClientCnx cnx) { } log.warn("[{}][{}] Failed to subscribe to topic on {}", topic, subscription, cnx.channel().remoteAddress()); - if (e.getCause() instanceof PulsarClientException - && PulsarClientException.isRetriableError(e.getCause()) - && System.currentTimeMillis() < SUBSCRIBE_DEADLINE_UPDATER.get(ConsumerImpl.this)) { - reconnectLater(e.getCause()); - } else if (!subscribeFuture.isDone()) { - // unable to create new consumer, fail operation - setState(State.Failed); - closeConsumerTasks(); - subscribeFuture.completeExceptionally( - PulsarClientException.wrap(e, String.format("Failed to subscribe the topic %s with subscription " + - "name %s when connecting to the broker", topicName.toString(), subscription))); - client.cleanupConsumer(this); - } else if (e.getCause() instanceof TopicDoesNotExistException) { - // The topic was deleted after the consumer was created, and we're - // not allowed to recreate the topic. This can happen in few cases: - // * Regex consumer getting error after topic gets deleted - // * Regular consumer after topic is manually delete and with - // auto-topic-creation set to false - // No more retries are needed in this case. - setState(State.Failed); - closeConsumerTasks(); - client.cleanupConsumer(this); - log.warn("[{}][{}] Closed consumer because topic does not exist anymore {}", topic, subscription, cnx.channel().remoteAddress()); - } else { - // consumer was subscribed and connected but we got some error, keep trying - reconnectLater(e.getCause()); - } - return null; - }); + if (e.getCause() instanceof PulsarClientException.TimeoutException) { + // Creating the consumer has timed out. We need to ensure the broker closes the consumer + // in case it was indeed created, otherwise it might prevent new create consumer operation, + // since we are not necessarily closing the connection. + long closeRequestId = client.newRequestId(); + ByteBuf cmd = Commands.newCloseConsumer(consumerId, closeRequestId); + cnx.sendRequestWithId(cmd, closeRequestId); + } + + if (e.getCause() instanceof PulsarClientException + && PulsarClientException.isRetriableError(e.getCause()) + && System.currentTimeMillis() < SUBSCRIBE_DEADLINE_UPDATER.get(ConsumerImpl.this)) { + reconnectLater(e.getCause()); + } else if (!subscribeFuture.isDone()) { + // unable to create new consumer, fail operation + setState(State.Failed); + closeConsumerTasks(); + subscribeFuture.completeExceptionally( + PulsarClientException.wrap(e, String.format("Failed to subscribe the topic %s " + + "with subscription name %s when connecting to the broker", + topicName.toString(), subscription))); + client.cleanupConsumer(this); + } else if (e.getCause() instanceof TopicDoesNotExistException) { + // The topic was deleted after the consumer was created, and we're + // not allowed to recreate the topic. This can happen in few cases: + // * Regex consumer getting error after topic gets deleted + // * Regular consumer after topic is manually delete and with + // auto-topic-creation set to false + // No more retries are needed in this case. + setState(State.Failed); + closeConsumerTasks(); + client.cleanupConsumer(this); + log.warn("[{}][{}] Closed consumer because topic does not exist anymore {}", + topic, subscription, cnx.channel().remoteAddress()); + } else { + // consumer was subscribed and connected but we got some error, keep trying + reconnectLater(e.getCause()); + } + return null; + }); } protected void consumerIsReconnectedToBroker(ClientCnx cnx, int currentQueueSize) { @@ -979,6 +1003,7 @@ private void closeConsumerTasks() { if (batchReceiveTimeout != null) { batchReceiveTimeout.cancel(); } + negativeAcksTracker.close(); stats.getStatTimeout().ifPresent(Timeout::cancel); } @@ -1122,8 +1147,7 @@ private void processPayloadByProcessor(final BrokerEntryMetadata brokerEntryMeta increaseAvailablePermits(cnx(), skippedMessages.get()); } - internalPinnedExecutor.execute(() - -> tryTriggerListener()); + tryTriggerListener(); } void messageReceived(MessageIdData messageId, int redeliveryCount, List ackSet, ByteBuf headersAndPayload, ClientCnx cnx) { @@ -1151,9 +1175,9 @@ void messageReceived(MessageIdData messageId, int redeliveryCount, List ac final int numMessages = msgMetadata.getNumMessagesInBatch(); final int numChunks = msgMetadata.hasNumChunksFromMsg() ? msgMetadata.getNumChunksFromMsg() : 0; final boolean isChunkedMessage = numChunks > 1 && conf.getSubscriptionType() != SubscriptionType.Shared; - MessageIdImpl msgId = new MessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), getPartitionIndex()); - if (acknowledgmentsGroupingTracker.isDuplicate(msgId)) { + if (numMessages == 1 && !msgMetadata.hasNumMessagesInBatch() + && acknowledgmentsGroupingTracker.isDuplicate(msgId)) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Ignoring message as it was already being acked earlier by same consumer {}/{}", topic, subscription, consumerName, msgId); @@ -1216,10 +1240,18 @@ void messageReceived(MessageIdData messageId, int redeliveryCount, List ac newMessage(msgId, brokerEntryMetadata, msgMetadata, uncompressedPayload, schema, redeliveryCount); uncompressedPayload.release(); - if (deadLetterPolicy != null && possibleSendToDeadLetterTopicMessages != null && - redeliveryCount >= deadLetterPolicy.getMaxRedeliverCount()) { - possibleSendToDeadLetterTopicMessages.put((MessageIdImpl) message.getMessageId(), - Collections.singletonList(message)); + if (deadLetterPolicy != null && possibleSendToDeadLetterTopicMessages != null) { + if (redeliveryCount >= deadLetterPolicy.getMaxRedeliverCount()) { + possibleSendToDeadLetterTopicMessages.put((MessageIdImpl) message.getMessageId(), + Collections.singletonList(message)); + if (redeliveryCount > deadLetterPolicy.getMaxRedeliverCount()) { + redeliverUnacknowledgedMessages(Collections.singleton(message.getMessageId())); + // The message is skipped due to reaching the max redelivery count, + // so we need to increase the available permits + increaseAvailablePermits(cnx); + return; + } + } } executeNotifyCallback(message); } else { @@ -1228,19 +1260,8 @@ void messageReceived(MessageIdData messageId, int redeliveryCount, List ac uncompressedPayload.release(); } - internalPinnedExecutor.execute(() - -> tryTriggerListener()); - - } - - private void tryTriggerListener() { - if (listener != null) { - triggerListener(); - } - } + tryTriggerListener(); - private boolean isTxnMessage(MessageMetadata messageMetadata) { - return messageMetadata.hasTxnidMostBits() && messageMetadata.hasTxnidLeastBits(); } private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata msgMetadata, MessageIdImpl msgId, @@ -1248,15 +1269,17 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m // Lazy task scheduling to expire incomplete chunk message if (!expireChunkMessageTaskScheduled && expireTimeOfIncompleteChunkedMessageMillis > 0) { - internalPinnedExecutor.scheduleAtFixedRate(() -> { - removeExpireIncompleteChunkedMessages(); - }, expireTimeOfIncompleteChunkedMessageMillis, expireTimeOfIncompleteChunkedMessageMillis, - TimeUnit.MILLISECONDS); + ((ScheduledExecutorService) client.getScheduledExecutorProvider().getExecutor()).scheduleAtFixedRate( + () -> internalPinnedExecutor + .execute(catchingAndLoggingThrowables(this::removeExpireIncompleteChunkedMessages)), + expireTimeOfIncompleteChunkedMessageMillis, expireTimeOfIncompleteChunkedMessageMillis, + TimeUnit.MILLISECONDS + ); expireChunkMessageTaskScheduled = true; } if (msgMetadata.getChunkId() == 0) { - ByteBuf chunkedMsgBuffer = Unpooled.directBuffer(msgMetadata.getTotalChunkMsgSize(), + ByteBuf chunkedMsgBuffer = PulsarByteBufAllocator.DEFAULT.buffer(msgMetadata.getTotalChunkMsgSize(), msgMetadata.getTotalChunkMsgSize()); int totalChunks = msgMetadata.getNumChunksFromMsg(); chunkedMessagesMap.computeIfAbsent(msgMetadata.getUuid(), @@ -1271,12 +1294,10 @@ private ByteBuf processMessageChunk(ByteBuf compressedPayload, MessageMetadata m ChunkedMessageCtx chunkedMsgCtx = chunkedMessagesMap.get(msgMetadata.getUuid()); // discard message if chunk is out-of-order if (chunkedMsgCtx == null || chunkedMsgCtx.chunkedMsgBuffer == null - || msgMetadata.getChunkId() != (chunkedMsgCtx.lastChunkedMessageId + 1) - || msgMetadata.getChunkId() >= msgMetadata.getTotalChunkMsgSize()) { + || msgMetadata.getChunkId() != (chunkedMsgCtx.lastChunkedMessageId + 1)) { // means we lost the first chunk: should never happen - log.info("Received unexpected chunk messageId {}, last-chunk-id{}, chunkId = {}, total-chunks {}", msgId, - (chunkedMsgCtx != null ? chunkedMsgCtx.lastChunkedMessageId : null), msgMetadata.getChunkId(), - msgMetadata.getTotalChunkMsgSize()); + log.info("Received unexpected chunk messageId {}, last-chunk-id{}, chunkId = {}", msgId, + (chunkedMsgCtx != null ? chunkedMsgCtx.lastChunkedMessageId : null), msgMetadata.getChunkId()); if (chunkedMsgCtx != null) { if (chunkedMsgCtx.chunkedMsgBuffer != null) { ReferenceCountUtil.safeRelease(chunkedMsgCtx.chunkedMsgBuffer); @@ -1404,6 +1425,15 @@ void receiveIndividualMessagesFromBatch(BrokerEntryMetadata brokerEntryMetadata, } if (possibleToDeadLetter != null) { possibleToDeadLetter.add(message); + // Skip the message which reaches the max redelivery count. + if (redeliveryCount > deadLetterPolicy.getMaxRedeliverCount()) { + skippedMessages++; + continue; + } + } + if (acknowledgmentsGroupingTracker.isDuplicate(message.getMessageId())) { + skippedMessages++; + continue; } executeNotifyCallback(message); } @@ -1415,8 +1445,14 @@ void receiveIndividualMessagesFromBatch(BrokerEntryMetadata brokerEntryMetadata, discardCorruptedMessage(messageId, cnx, ValidationError.BatchDeSerializeError); } - if (possibleToDeadLetter != null && possibleSendToDeadLetterTopicMessages != null) { - possibleSendToDeadLetterTopicMessages.put(batchMessage, possibleToDeadLetter); + if (deadLetterPolicy != null && possibleSendToDeadLetterTopicMessages != null) { + if (redeliveryCount >= deadLetterPolicy.getMaxRedeliverCount()) { + possibleSendToDeadLetterTopicMessages.put(batchMessage, + possibleToDeadLetter); + if (redeliveryCount > deadLetterPolicy.getMaxRedeliverCount()) { + redeliverUnacknowledgedMessages(Collections.singleton(batchMessage)); + } + } } if (log.isDebugEnabled()) { @@ -1457,7 +1493,9 @@ protected synchronized void messageProcessed(Message msg) { if (msgCnx != currentCnx) { // The processed message did belong to the old queue that was cleared after reconnection. } else { - increaseAvailablePermits(currentCnx); + if (listener == null && !parentConsumerHasListener) { + increaseAvailablePermits(currentCnx); + } stats.updateNumMsgsReceived(msg); trackMessage(msg); @@ -1488,6 +1526,14 @@ protected void trackMessage(MessageId messageId) { } } + void increaseAvailablePermits(MessageImpl msg) { + ClientCnx currentCnx = cnx(); + ClientCnx msgCnx = msg.getCnx(); + if (msgCnx == currentCnx) { + increaseAvailablePermits(currentCnx); + } + } + void increaseAvailablePermits(ClientCnx currentCnx) { increaseAvailablePermits(currentCnx, 1); } @@ -1772,21 +1818,20 @@ private CompletableFuture> getRedeliveryMessageIdData(List data = new ArrayList<>(messageIds.size()); - List> futures = new ArrayList<>(messageIds.size()); - messageIds.forEach(messageId -> { + List> futures = messageIds.stream().map(messageId -> { CompletableFuture future = processPossibleToDLQ(messageId); - futures.add(future); - future.thenAccept(sendToDLQ -> { + return future.thenApply(sendToDLQ -> { if (!sendToDLQ) { - data.add(new MessageIdData() + return new MessageIdData() .setPartition(messageId.getPartitionIndex()) .setLedgerId(messageId.getLedgerId()) - .setEntryId(messageId.getEntryId())); + .setEntryId(messageId.getEntryId()); } + return null; }); - }); - return FutureUtil.waitForAll(futures).thenCompose(v -> CompletableFuture.completedFuture(data)); + }).collect(Collectors.toList()); + return FutureUtil.waitForAll(futures).thenApply(v -> + futures.stream().map(CompletableFuture::join).filter(Objects::nonNull).collect(Collectors.toList())); } private CompletableFuture processPossibleToDLQ(MessageIdImpl messageId) { @@ -1807,10 +1852,14 @@ private CompletableFuture processPossibleToDLQ(MessageIdImpl messageId) for (MessageImpl message : finalDeadLetterMessages) { String originMessageIdStr = getOriginMessageIdStr(message); String originTopicNameStr = getOriginTopicNameStr(message); - producerDLQ.newMessage(Schema.AUTO_PRODUCE_BYTES(message.getReaderSchema().get())) + TypedMessageBuilder typedMessageBuilderNew = + producerDLQ.newMessage(Schema.AUTO_PRODUCE_BYTES(message.getReaderSchema().get())) .value(message.getData()) - .properties(getPropertiesMap(message, originMessageIdStr, originTopicNameStr)) - .sendAsync() + .properties(getPropertiesMap(message, originMessageIdStr, originTopicNameStr)); + if (message.hasKey()) { + typedMessageBuilderNew.key(message.getKey()); + } + typedMessageBuilderNew.sendAsync() .thenAccept(messageIdInDLQ -> { possibleSendToDeadLetterTopicMessages.remove(finalMessageId); acknowledgeAsync(finalMessageId).whenComplete((v, ex) -> { @@ -2000,7 +2049,7 @@ public CompletableFuture hasMessageAvailableAsync() { if (lastDequeuedMessageId == MessageId.earliest) { // if we are starting from latest, we should seek to the actual last message first. // allow the last one to be read when read head inclusively. - if (startMessageId.equals(MessageId.latest)) { + if (MessageId.latest.equals(startMessageId)) { CompletableFuture future = internalGetLastMessageIdAsync(); // if the consumer is configured to read inclusive then we need to seek to the last message @@ -2015,8 +2064,10 @@ public CompletableFuture hasMessageAvailableAsync() { MessageIdImpl markDeletePosition = MessageIdImpl .convertToMessageIdImpl(response.markDeletePosition); - if (markDeletePosition != null) { - // we only care about comparing ledger ids and entry ids as mark delete position doesn't have other ids such as batch index + if (markDeletePosition != null && !(markDeletePosition.getEntryId() < 0 + && markDeletePosition.getLedgerId() > lastMessageId.getLedgerId())) { + // we only care about comparing ledger ids and entry ids as mark delete position doesn't have + // other ids such as batch index int result = ComparisonChain.start() .compare(markDeletePosition.getLedgerId(), lastMessageId.getLedgerId()) .compare(markDeletePosition.getEntryId(), lastMessageId.getEntryId()) @@ -2192,7 +2243,7 @@ private void internalGetLastMessageIdAsync(final Backoff backoff, return; } - internalPinnedExecutor.schedule(() -> { + ((ScheduledExecutorService) client.getScheduledExecutorProvider().getExecutor()).schedule(() -> { log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms", topic, getHandlerName(), nextDelay); remainingTime.addAndGet(-nextDelay); @@ -2487,7 +2538,14 @@ private CompletableFuture doTransactionAcknowledgeForResponse(MessageId me } else { unAckedMessageTracker.remove(messageId); } - return cnx().newAckForReceipt(cmd, requestId); + ClientCnx cnx = cnx(); + if (cnx == null) { + return FutureUtil.failedFuture(new PulsarClientException + .ConnectException("Failed to ack message [" + messageId + "] " + + "for transaction [" + txnID + "] due to consumer connect fail, consumer state: " + getState())); + } else { + return cnx.newAckForReceipt(cmd, requestId); + } } public Map>> getPossibleSendToDeadLetterTopicMessages() { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java index fb61a9a8fa371..4fde45bd3b454 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerStatsRecorderImpl.java @@ -115,7 +115,7 @@ private void init(ConsumerConfigurationData conf) { try { log.info("Starting Pulsar consumer status recorder with config: {}", w.writeValueAsString(conf)); - log.info("Pulsar client config: {}", w.withoutAttribute("authentication").writeValueAsString(pulsarClient.getConfiguration())); + log.info("Pulsar client config: {}", w.writeValueAsString(pulsarClient.getConfiguration())); } catch (IOException e) { log.error("Failed to dump config info", e); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HandlerState.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HandlerState.java index e72c97fadadcc..582df8c112bc3 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HandlerState.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HandlerState.java @@ -64,6 +64,13 @@ protected State getState() { return STATE_UPDATER.get(this); } + protected boolean changeToConnecting() { + return (STATE_UPDATER.compareAndSet(this, State.Uninitialized, State.Connecting) + || STATE_UPDATER.compareAndSet(this, State.Ready, State.Connecting) + || STATE_UPDATER.compareAndSet(this, State.RegisteringSchema, State.Connecting) + || STATE_UPDATER.compareAndSet(this, State.Connecting, State.Connecting)); + } + protected void setState(State s) { STATE_UPDATER.set(this, s); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpClient.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpClient.java index c295975ecd36a..12830ed5a6b78 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpClient.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpClient.java @@ -18,24 +18,24 @@ */ package org.apache.pulsar.client.impl; +import io.netty.channel.EventLoopGroup; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslProvider; import java.io.Closeable; import java.io.IOException; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; +import java.security.GeneralSecurityException; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.CompletableFuture; - -import io.netty.channel.EventLoopGroup; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.ssl.SslContext; import javax.net.ssl.SSLContext; import lombok.extern.slf4j.Slf4j; - import org.apache.pulsar.PulsarVersion; import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.api.AuthenticationDataProvider; @@ -43,6 +43,7 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.PulsarClientException.NotFoundException; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.util.WithSNISslEngineFactory; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.common.util.SecurityUtility; import org.apache.pulsar.common.util.keystoretls.KeyStoreSSLContext; @@ -72,6 +73,7 @@ protected HttpClient(ClientConfigurationData conf, EventLoopGroup eventLoopGroup this.serviceNameResolver.updateServiceUrl(conf.getServiceUrl()); DefaultAsyncHttpClientConfig.Builder confBuilder = new DefaultAsyncHttpClientConfig.Builder(); + confBuilder.setUseProxyProperties(true); confBuilder.setFollowRedirect(true); confBuilder.setMaxRedirects(conf.getMaxLookupRedirects()); confBuilder.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_IN_SECONDS * 1000); @@ -111,25 +113,38 @@ public boolean keepAlive(InetSocketAddress remoteAddress, Request ahcRequest, JsseSslEngineFactory sslEngineFactory = new JsseSslEngineFactory(sslCtx); confBuilder.setSslEngineFactory(sslEngineFactory); } else { + SslProvider sslProvider = null; + if (conf.getSslProvider() != null) { + sslProvider = SslProvider.valueOf(conf.getSslProvider()); + } SslContext sslCtx = null; if (authData.hasDataForTls()) { sslCtx = authData.getTlsTrustStoreStream() == null - ? SecurityUtility.createNettySslContextForClient(conf.isTlsAllowInsecureConnection(), - conf.getTlsTrustCertsFilePath(), authData.getTlsCertificates(), - authData.getTlsPrivateKey()) - : SecurityUtility.createNettySslContextForClient(conf.isTlsAllowInsecureConnection(), - authData.getTlsTrustStoreStream(), authData.getTlsCertificates(), - authData.getTlsPrivateKey()); - } - else { + ? SecurityUtility.createNettySslContextForClient(sslProvider, + conf.isTlsAllowInsecureConnection(), + conf.getTlsTrustCertsFilePath(), authData.getTlsCertificates(), + authData.getTlsPrivateKey(), conf.getTlsCiphers(), conf.getTlsProtocols()) + : SecurityUtility.createNettySslContextForClient(sslProvider, + conf.isTlsAllowInsecureConnection(), + authData.getTlsTrustStoreStream(), authData.getTlsCertificates(), + authData.getTlsPrivateKey(), conf.getTlsCiphers(), conf.getTlsProtocols()); + } else { sslCtx = SecurityUtility.createNettySslContextForClient( + sslProvider, conf.isTlsAllowInsecureConnection(), - conf.getTlsTrustCertsFilePath()); + conf.getTlsTrustCertsFilePath(), conf.getTlsCiphers(), conf.getTlsProtocols()); } confBuilder.setSslContext(sslCtx); + if (!conf.isTlsHostnameVerificationEnable()) { + confBuilder.setSslEngineFactory(new WithSNISslEngineFactory(serviceNameResolver + .resolveHostUri().getHost())); + } } confBuilder.setUseInsecureTrustManager(conf.isTlsAllowInsecureConnection()); + confBuilder.setDisableHttpsEndpointIdentificationAlgorithm(!conf.isTlsHostnameVerificationEnable()); + } catch (GeneralSecurityException e) { + throw new PulsarClientException.InvalidConfigurationException(e); } catch (Exception e) { throw new PulsarClientException.InvalidConfigurationException(e); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java index f2cc1692eebe6..72326c3db1f20 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/HttpLookupService.java @@ -39,6 +39,7 @@ import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace.Mode; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.PulsarClientException.NotFoundException; +import org.apache.pulsar.client.api.SchemaSerializationException; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.lookup.data.LookupData; import org.apache.pulsar.common.naming.NamespaceName; @@ -162,6 +163,10 @@ public CompletableFuture> getSchema(TopicName topicName, by String schemaName = topicName.getSchemaName(); String path = String.format("admin/v2/schemas/%s/schema", schemaName); if (version != null) { + if (version.length == 0) { + future.completeExceptionally(new SchemaSerializationException("Empty schema version")); + return future; + } path = String.format("admin/v2/schemas/%s/schema/%s", schemaName, ByteBuffer.wrap(version).getLong()); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageImpl.java index 67c176cfe63bf..acdf73bed3f82 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessageImpl.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; import java.util.List; @@ -42,6 +43,7 @@ import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SchemaSerializationException; import org.apache.pulsar.client.impl.schema.AbstractSchema; import org.apache.pulsar.client.impl.schema.AutoConsumeSchema; import org.apache.pulsar.client.impl.schema.KeyValueSchemaImpl; @@ -52,6 +54,7 @@ import org.apache.pulsar.common.api.proto.SingleMessageMetadata; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.schema.BytesSchemaVersion; +import org.apache.pulsar.common.protocol.schema.SchemaHash; import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; @@ -64,6 +67,8 @@ public class MessageImpl implements Message { private ByteBuf payload; private Schema schema; + + private SchemaHash schemaHash; private SchemaInfo schemaInfoForReplicator; private SchemaState schemaState = SchemaState.None; private Optional encryptionCtx = Optional.empty(); @@ -90,6 +95,7 @@ public static MessageImpl create(MessageMetadata msgMetadata, ByteBuffer msg.payload = Unpooled.wrappedBuffer(payload); msg.properties = null; msg.schema = schema; + msg.schemaHash = SchemaHash.of(schema); msg.uncompressedSize = payload.remaining(); return msg; } @@ -391,12 +397,14 @@ public Optional> getReaderSchema() { if (schema == null) { return Optional.empty(); } + byte[] schemaVersion = getSchemaVersion(); + if (schemaVersion == null) { + return Optional.of(schema); + } if (schema instanceof AutoConsumeSchema) { - byte[] schemaVersion = getSchemaVersion(); return Optional.of(((AutoConsumeSchema) schema) .atSchemaVersion(schemaVersion)); } else if (schema instanceof AbstractSchema) { - byte[] schemaVersion = getSchemaVersion(); return Optional.of(((AbstractSchema) schema) .atSchemaVersion(schemaVersion)); } else { @@ -404,10 +412,13 @@ public Optional> getReaderSchema() { } } + // For messages produced by older version producers without schema, the schema version is an empty byte array + // rather than null. @Override public byte[] getSchemaVersion() { if (msgMetadata.hasSchemaVersion()) { - return msgMetadata.getSchemaVersion(); + byte[] schemaVersion = msgMetadata.getSchemaVersion(); + return (schemaVersion.length == 0) ? null : schemaVersion; } else { return null; } @@ -432,9 +443,14 @@ public SchemaInfo getSchemaInfo() { return schema.getSchemaInfo(); } + public SchemaHash getSchemaHash() { + return schemaHash == null ? SchemaHash.of(new byte[0], null) : schemaHash; + } + public void setSchemaInfoForReplicator(SchemaInfo schemaInfo) { if (msgMetadata.hasReplicatedFrom()) { this.schemaInfoForReplicator = schemaInfo; + this.schemaHash = SchemaHash.of(schemaInfo); } else { throw new IllegalArgumentException("Only allowed to set schemaInfoForReplicator for a replicated message."); } @@ -472,8 +488,19 @@ private KeyValueSchemaImpl getKeyValueSchema() { } } - private T decode(byte[] schemaVersion) { + try { + return decodeBySchema(schemaVersion); + } catch (ArrayIndexOutOfBoundsException e) { + // It usually means the message was produced without schema check while the message is not compatible with + // the current schema. Therefore, convert it to SchemaSerializationException with a better description. + final int payloadSize = payload.readableBytes(); + throw new SchemaSerializationException("payload (" + payloadSize + " bytes) cannot be decoded with schema " + + new String(schema.getSchemaInfo().getSchema(), StandardCharsets.UTF_8)); + } + } + + private T decodeBySchema(byte[] schemaVersion) { T value = poolMessage ? schema.decode(payload.nioBuffer(), schemaVersion) : null; if (value != null) { return value; @@ -743,9 +770,6 @@ int getUncompressedSize() { } SchemaState getSchemaState() { - if (getSchemaInfo() == null) { - return SchemaState.Ready; - } return schemaState; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagePayloadContextImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagePayloadContextImpl.java index aa6cab80db2bc..f21900387b623 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagePayloadContextImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagePayloadContextImpl.java @@ -88,6 +88,7 @@ public void recycle() { ackBitSet.recycle(); ackBitSet = null; } + recyclerHandle.recycle(this); } @Override diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagesImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagesImpl.java index 4ff23eb46f5b6..532d152a8318c 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagesImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MessagesImpl.java @@ -30,7 +30,7 @@ @NotThreadSafe public class MessagesImpl implements Messages { - private List> messageList; + private final List> messageList; private final int maxNumberOfMessages; private final long maxSizeOfMessages; @@ -45,6 +45,10 @@ protected MessagesImpl(int maxNumberOfMessages, long maxSizeOfMessages) { } protected boolean canAdd(Message message) { + if (currentNumberOfMessages == 0) { + // It's ok to add at least one message into a batch. + return true; + } if (maxNumberOfMessages > 0 && currentNumberOfMessages + 1 > maxNumberOfMessages) { return false; } @@ -77,6 +81,10 @@ public void clear() { this.messageList.clear(); } + List> getMessageList() { + return messageList; + } + @Override public Iterator> iterator() { return messageList.iterator(); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java index 520e7f3ab099c..2dd6bb9e304df 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImpl.java @@ -24,28 +24,6 @@ import com.google.common.collect.Lists; import io.netty.util.Timeout; import io.netty.util.TimerTask; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.pulsar.client.api.Consumer; -import org.apache.pulsar.client.api.ConsumerStats; -import org.apache.pulsar.client.api.Message; -import org.apache.pulsar.client.api.MessageId; -import org.apache.pulsar.client.api.Messages; -import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.client.api.PulsarClientException.NotSupportedException; -import org.apache.pulsar.client.api.Schema; -import org.apache.pulsar.client.api.SubscriptionType; -import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; -import org.apache.pulsar.client.impl.transaction.TransactionImpl; -import org.apache.pulsar.client.util.ConsumerName; -import org.apache.pulsar.client.util.ExecutorProvider; -import org.apache.pulsar.common.api.proto.CommandAck.AckType; -import org.apache.pulsar.common.naming.TopicName; -import org.apache.pulsar.common.partition.PartitionedTopicMetadata; -import org.apache.pulsar.common.util.CompletableFutureCancellationHandler; -import org.apache.pulsar.common.util.FutureUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -61,14 +39,35 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.pulsar.client.api.BatchReceivePolicy; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.ConsumerStats; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.Messages; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.PulsarClientException.NotSupportedException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; +import org.apache.pulsar.client.impl.transaction.TransactionImpl; +import org.apache.pulsar.client.util.ConsumerName; +import org.apache.pulsar.client.util.ExecutorProvider; +import org.apache.pulsar.common.api.proto.CommandAck.AckType; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.partition.PartitionedTopicMetadata; +import org.apache.pulsar.common.util.CompletableFutureCancellationHandler; +import org.apache.pulsar.common.util.FutureUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; @@ -100,8 +99,6 @@ public class MultiTopicsConsumerImpl extends ConsumerBase { private volatile Timeout partitionsAutoUpdateTimeout = null; TopicsPartitionChangedListener topicsPartitionChangedListener; CompletableFuture partitionsAutoUpdateFuture = null; - private final ReadWriteLock lock = new ReentrantReadWriteLock(); - private final ConsumerStatsRecorder stats; private UnAckedMessageTracker unAckedMessageTracker; private final ConsumerConfigurationData internalConfig; @@ -239,19 +236,29 @@ private void startReceivingMessages(List> newConsumers) { if (getState() == State.Ready) { newConsumers.forEach(consumer -> { consumer.increaseAvailablePermits(consumer.getConnectionHandler().cnx(), conf.getReceiverQueueSize()); - internalPinnedExecutor.execute(() -> receiveMessageFromConsumer(consumer)); + internalPinnedExecutor.execute(() -> receiveMessageFromConsumer(consumer, true)); }); } } - private void receiveMessageFromConsumer(ConsumerImpl consumer) { - consumer.receiveAsync().thenAccept(message -> { + private void receiveMessageFromConsumer(ConsumerImpl consumer, boolean batchReceive) { + CompletableFuture>> messagesFuture; + if (batchReceive) { + messagesFuture = consumer.batchReceiveAsync().thenApply(msgs -> ((MessagesImpl) msgs).getMessageList()); + } else { + messagesFuture = consumer.receiveAsync().thenApply(Collections::singletonList); + } + messagesFuture.thenAcceptAsync(messages -> { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Receive message from sub consumer:{}", topic, subscription, consumer.getTopic()); } + // Stop to process the remaining message after the consumer is closed. + if (getState() == State.Closed) { + return; + } // Process the message, add to the queue and trigger listener or async callback - messageReceived(consumer, message); + messages.forEach(msg -> messageReceived(consumer, msg)); int size = incomingMessages.size(); if (size >= maxReceiverQueueSize @@ -260,27 +267,29 @@ private void receiveMessageFromConsumer(ConsumerImpl consumer) { // or if any consumer is already paused (to create fair chance for already paused consumers) pausedConsumers.add(consumer); - // Since we din't get a mutex, the condition on the incoming queue might have changed after + // Since we didn't get a mutex, the condition on the incoming queue might have changed after // we have paused the current consumer. We need to re-check in order to avoid this consumer // from getting stalled. resumeReceivingFromPausedConsumersIfNeeded(); } else { - // Schedule next receiveAsync() if the incoming queue is not full. Use a different thread to avoid - // recursion and stack overflow - internalPinnedExecutor.execute(() -> receiveMessageFromConsumer(consumer)); + // Call receiveAsync() if the incoming queue is not full. Because this block is run with + // thenAcceptAsync, there is no chance for recursion that would lead to stack overflow. + receiveMessageFromConsumer(consumer, messages.size() > 0); } - }).exceptionally(ex -> { + }, internalPinnedExecutor).exceptionally(ex -> { if (ex instanceof PulsarClientException.AlreadyClosedException || ex.getCause() instanceof PulsarClientException.AlreadyClosedException) { // ignore the exception that happens when the consumer is closed return null; } log.error("Receive operation failed on consumer {} - Retrying later", consumer, ex); - internalPinnedExecutor.schedule(() -> receiveMessageFromConsumer(consumer), 10, TimeUnit.SECONDS); + ((ScheduledExecutorService) client.getScheduledExecutorProvider().getExecutor()) + .schedule(() -> receiveMessageFromConsumer(consumer, true), 10, TimeUnit.SECONDS); return null; }); } + // Must be called from the internalPinnedExecutor thread private void messageReceived(ConsumerImpl consumer, Message message) { checkArgument(message instanceof MessageImpl); TopicMessageImpl topicMessage = new TopicMessageImpl<>(consumer.getTopic(), @@ -300,9 +309,7 @@ private void messageReceived(ConsumerImpl consumer, Message message) { notifyPendingBatchReceivedCallBack(); } - if (listener != null) { - triggerListener(); - } + tryTriggerListener(); } @Override @@ -320,7 +327,7 @@ private void resumeReceivingFromPausedConsumersIfNeeded() { } internalPinnedExecutor.execute(() -> { - receiveMessageFromConsumer(consumer); + receiveMessageFromConsumer(consumer, true); }); } } @@ -377,8 +384,7 @@ protected Messages internalBatchReceive() throws PulsarClientException { protected CompletableFuture> internalBatchReceiveAsync() { CompletableFutureCancellationHandler cancellationHandler = new CompletableFutureCancellationHandler(); CompletableFuture> result = cancellationHandler.createFuture(); - try { - lock.writeLock().lock(); + internalPinnedExecutor.execute(() -> { if (hasEnoughMessagesForBatchReceive()) { MessagesImpl messages = getNewMessagesImpl(); Message msgPeeked = incomingMessages.peek(); @@ -395,13 +401,11 @@ protected CompletableFuture> internalBatchReceiveAsync() { } else { OpBatchReceive opBatchReceive = OpBatchReceive.of(result); pendingBatchReceives.add(opBatchReceive); + triggerBatchReceiveTimeoutTask(); cancellationHandler.setCancelAction(() -> pendingBatchReceives.remove(opBatchReceive)); } resumeReceivingFromPausedConsumersIfNeeded(); - } finally { - lock.writeLock().unlock(); - } - + }); return result; } @@ -409,17 +413,19 @@ protected CompletableFuture> internalBatchReceiveAsync() { protected CompletableFuture> internalReceiveAsync() { CompletableFutureCancellationHandler cancellationHandler = new CompletableFutureCancellationHandler(); CompletableFuture> result = cancellationHandler.createFuture(); - Message message = incomingMessages.poll(); - if (message == null) { - pendingReceives.add(result); - cancellationHandler.setCancelAction(() -> pendingReceives.remove(result)); - } else { - decreaseIncomingMessageSize(message); - checkState(message instanceof TopicMessageImpl); - unAckedMessageTracker.add(message.getMessageId()); - resumeReceivingFromPausedConsumersIfNeeded(); - result.complete(message); - } + internalPinnedExecutor.execute(() -> { + Message message = incomingMessages.poll(); + if (message == null) { + pendingReceives.add(result); + cancellationHandler.setCancelAction(() -> pendingReceives.remove(result)); + } else { + decreaseIncomingMessageSize(message); + checkState(message instanceof TopicMessageImpl); + unAckedMessageTracker.add(message.getMessageId()); + resumeReceivingFromPausedConsumersIfNeeded(); + result.complete(message); + } + }); return result; } @@ -532,14 +538,14 @@ public CompletableFuture unsubscribeAsync() { .map(ConsumerImpl::unsubscribeAsync).collect(Collectors.toList()); FutureUtil.waitForAll(futureList) - .thenCompose((r) -> { + .thenComposeAsync((r) -> { setState(State.Closed); cleanupMultiConsumer(); log.info("[{}] [{}] [{}] Unsubscribed Topics Consumer", topic, subscription, consumerName); // fail all pending-receive futures to notify application return failPendingReceive(); - }) + }, internalPinnedExecutor) .whenComplete((r, ex) -> { if (ex == null) { unsubscribeFuture.complete(null); @@ -574,13 +580,13 @@ public CompletableFuture closeAsync() { .map(ConsumerImpl::closeAsync).collect(Collectors.toList()); FutureUtil.waitForAll(futureList) - .thenCompose((r) -> { + .thenComposeAsync((r) -> { setState(State.Closed); cleanupMultiConsumer(); log.info("[{}] [{}] Closed Topics Consumer", topic, subscription); // fail all pending-receive futures to notify application return failPendingReceive(); - }) + }, internalPinnedExecutor) .whenComplete((r, ex) -> { if (ex == null) { closeFuture.complete(null); @@ -627,17 +633,14 @@ private ConsumerConfigurationData getInternalConsumerConfig() { @Override public void redeliverUnacknowledgedMessages() { - lock.writeLock().lock(); - try { + internalPinnedExecutor.execute(() -> { consumers.values().stream().forEach(consumer -> { consumer.redeliverUnacknowledgedMessages(); consumer.unAckedChunkedMessageIdSequenceMap.clear(); }); clearIncomingMessages(); unAckedMessageTracker.clear(); - } finally { - lock.writeLock().unlock(); - } + }); resumeReceivingFromPausedConsumersIfNeeded(); } @@ -751,6 +754,9 @@ public boolean hasMessageAvailable() throws PulsarClientException { } public CompletableFuture hasMessageAvailableAsync() { + if (numMessagesInQueue() > 0) { + return CompletableFuture.completedFuture(true); + } List> futureList = new ArrayList<>(); final AtomicBoolean hasMessageAvailable = new AtomicBoolean(false); for (ConsumerImpl consumer : consumers.values()) { @@ -765,7 +771,7 @@ public CompletableFuture hasMessageAvailableAsync() { if (exception != null) { completableFuture.completeExceptionally(exception); } else { - completableFuture.complete(hasMessageAvailable.get()); + completableFuture.complete(hasMessageAvailable.get() || numMessagesInQueue() > 0); } }); return completableFuture; @@ -969,14 +975,13 @@ private void doSubscribeTopicPartitions(Schema schema, partitionIndex -> { String partitionName = TopicName.get(topicName).getPartition(partitionIndex).toString(); CompletableFuture> subFuture = new CompletableFuture<>(); - ConsumerImpl newConsumer = ConsumerImpl.newConsumerImpl(client, partitionName, - configurationData, client.externalExecutorProvider(), - partitionIndex, true, subFuture, - startMessageId, schema, interceptors, - createIfDoesNotExist, startMessageRollbackDurationInSec); + ConsumerImpl newConsumer = createInternalConsumer(configurationData, partitionName, + partitionIndex, subFuture, createIfDoesNotExist, schema); synchronized (pauseMutex) { if (paused) { newConsumer.pause(); + } else { + newConsumer.resume(); } consumers.putIfAbsent(newConsumer.getTopic(), newConsumer); } @@ -996,14 +1001,14 @@ private void doSubscribeTopicPartitions(Schema schema, subscribeResult.completeExceptionally(new PulsarClientException(errorMessage)); return existingValue; } else { - ConsumerImpl newConsumer = ConsumerImpl.newConsumerImpl(client, topicName, internalConfig, - client.externalExecutorProvider(), -1, - true, subFuture, null, schema, interceptors, - createIfDoesNotExist); + ConsumerImpl newConsumer = createInternalConsumer(internalConfig, topicName, + -1, subFuture, createIfDoesNotExist, schema); synchronized (pauseMutex) { if (paused) { newConsumer.pause(); + } else { + newConsumer.resume(); } } return newConsumer; @@ -1039,6 +1044,22 @@ private void doSubscribeTopicPartitions(Schema schema, }); } + private ConsumerImpl createInternalConsumer(ConsumerConfigurationData configurationData, String partitionName, + int partitionIndex, CompletableFuture> subFuture, + boolean createIfDoesNotExist, Schema schema) { + BatchReceivePolicy internalBatchReceivePolicy = BatchReceivePolicy.builder() + .maxNumMessages(Math.max(configurationData.getReceiverQueueSize() / 2, 1)) + .maxNumBytes(-1) + .timeout(1, TimeUnit.MILLISECONDS) + .build(); + configurationData.setBatchReceivePolicy(internalBatchReceivePolicy); + return ConsumerImpl.newConsumerImpl(client, partitionName, + configurationData, client.externalExecutorProvider(), + partitionIndex, true, listener != null, subFuture, + startMessageId, schema, interceptors, + createIfDoesNotExist, startMessageRollbackDurationInSec); + } + // handling failure during subscribe new topic, unsubscribe success created partitions private void handleSubscribeOneTopicError(String topicName, Throwable error, CompletableFuture subscribeFuture) { log.warn("[{}] Failed to subscribe for topic [{}] in topics consumer {}", topic, topicName, error.getMessage()); @@ -1105,7 +1126,9 @@ public CompletableFuture unsubscribeAsync(String topicName) { }); removeTopic(topicName); - ((UnAckedTopicMessageTracker) unAckedMessageTracker).removeTopicMessages(topicName); + if (unAckedMessageTracker instanceof UnAckedTopicMessageTracker) { + ((UnAckedTopicMessageTracker) unAckedMessageTracker).removeTopicMessages(topicName); + } unsubscribeFuture.complete(null); log.info("[{}] [{}] [{}] Unsubscribed Topics Consumer, allTopicPartitionsNumber: {}", @@ -1153,7 +1176,9 @@ public CompletableFuture removeConsumerAsync(String topicName) { }); removeTopic(topicName); - ((UnAckedTopicMessageTracker) unAckedMessageTracker).removeTopicMessages(topicName); + if (unAckedMessageTracker instanceof UnAckedTopicMessageTracker) { + ((UnAckedTopicMessageTracker) unAckedMessageTracker).removeTopicMessages(topicName); + } unsubscribeFuture.complete(null); log.info("[{}] [{}] [{}] Removed Topics Consumer, allTopicPartitionsNumber: {}", @@ -1291,14 +1316,13 @@ private CompletableFuture subscribeIncreasedTopicPartitions(String topicNa int partitionIndex = TopicName.getPartitionIndex(partitionName); CompletableFuture> subFuture = new CompletableFuture<>(); ConsumerConfigurationData configurationData = getInternalConsumerConfig(); - ConsumerImpl newConsumer = ConsumerImpl.newConsumerImpl( - client, partitionName, configurationData, - client.externalExecutorProvider(), - partitionIndex, true, subFuture, null, schema, interceptors, - true /* createTopicIfDoesNotExist */); + ConsumerImpl newConsumer = createInternalConsumer(configurationData, partitionName, + partitionIndex, subFuture, true, schema); synchronized (pauseMutex) { if (paused) { newConsumer.pause(); + } else { + newConsumer.resume(); } consumers.putIfAbsent(newConsumer.getTopic(), newConsumer); } @@ -1324,28 +1348,35 @@ private CompletableFuture subscribeIncreasedTopicPartitions(String topicNa topicName, oldPartitionNumber, currentPartitionNumber); return FutureUtil.failedFuture(new NotSupportedException("not support shrink topic partitions")); } + }).exceptionally(throwable -> { + log.warn("Failed to get partitions for topic to determine if new partitions are added", throwable); + return null; }); } private TimerTask partitionsAutoUpdateTimerTask = new TimerTask() { @Override public void run(Timeout timeout) throws Exception { - if (timeout.isCancelled() || getState() != State.Ready) { - return; - } + try { + if (timeout.isCancelled() || getState() != State.Ready) { + return; + } - if (log.isDebugEnabled()) { - log.debug("[{}] run partitionsAutoUpdateTimerTask", topic); - } + if (log.isDebugEnabled()) { + log.debug("[{}] run partitionsAutoUpdateTimerTask", topic); + } - // if last auto update not completed yet, do nothing. - if (partitionsAutoUpdateFuture == null || partitionsAutoUpdateFuture.isDone()) { - partitionsAutoUpdateFuture = topicsPartitionChangedListener.onTopicsExtended(partitionedTopics.keySet()); + // if last auto update not completed yet, do nothing. + if (partitionsAutoUpdateFuture == null || partitionsAutoUpdateFuture.isDone()) { + partitionsAutoUpdateFuture = topicsPartitionChangedListener.onTopicsExtended(partitionedTopics.keySet()); + } + } catch (Throwable th) { + log.warn("Encountered error in partition auto update timer task for multi-topic consumer. Another task will be scheduled.", th); + } finally { + // schedule the next re-check task + partitionsAutoUpdateTimeout = client.timer() + .newTimeout(partitionsAutoUpdateTimerTask, conf.getAutoUpdatePartitionsIntervalSeconds(), TimeUnit.SECONDS); } - - // schedule the next re-check task - partitionsAutoUpdateTimeout = client.timer() - .newTimeout(partitionsAutoUpdateTimerTask, conf.getAutoUpdatePartitionsIntervalSeconds(), TimeUnit.SECONDS); } }; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsReaderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsReaderImpl.java index fab61b2b51130..b656c005db9a5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsReaderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/MultiTopicsReaderImpl.java @@ -144,7 +144,7 @@ public boolean hasReachedEndOfTopic() { @Override public boolean hasMessageAvailable() throws PulsarClientException { - return multiTopicsConsumer.hasMessageAvailable() || multiTopicsConsumer.numMessagesInQueue() > 0; + return multiTopicsConsumer.hasMessageAvailable(); } @Override diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java index bbdd7864987bf..a0620093ffb96 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java @@ -21,6 +21,7 @@ import io.netty.util.Timeout; import io.netty.util.Timer; +import java.io.Closeable; import java.util.HashMap; import java.util.HashSet; import java.util.Set; @@ -28,9 +29,9 @@ import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; -import static org.apache.pulsar.client.impl.UnAckedMessageTracker.addChunkedMessageIdsAndRemoveFromSequnceMap; +import static org.apache.pulsar.client.impl.UnAckedMessageTracker.addChunkedMessageIdsAndRemoveFromSequenceMap; -class NegativeAcksTracker { +class NegativeAcksTracker implements Closeable { private HashMap nackedMessages = null; @@ -63,7 +64,7 @@ private synchronized void triggerRedelivery(Timeout t) { long now = System.nanoTime(); nackedMessages.forEach((msgId, timestamp) -> { if (timestamp < now) { - addChunkedMessageIdsAndRemoveFromSequnceMap(msgId, messagesToRedeliver, this.consumer); + addChunkedMessageIdsAndRemoveFromSequenceMap(msgId, messagesToRedeliver, this.consumer); messagesToRedeliver.add(msgId); } }); @@ -93,4 +94,17 @@ public synchronized void add(MessageId messageId) { this.timeout = timer.newTimeout(this::triggerRedelivery, timerIntervalNanos, TimeUnit.NANOSECONDS); } } + + @Override + public synchronized void close() { + if (timeout != null && !timeout.isCancelled()) { + timeout.cancel(); + timeout = null; + } + + if (nackedMessages != null) { + nackedMessages.clear(); + nackedMessages = null; + } + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java index 452553181ef2d..f8d3e8855047d 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java @@ -48,6 +48,7 @@ import org.apache.pulsar.client.api.TopicMetadata; import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.client.impl.conf.ProducerConfigurationData; +import org.apache.pulsar.client.impl.transaction.TransactionImpl; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; @@ -72,7 +73,8 @@ public class PartitionedProducerImpl extends ProducerBase { public PartitionedProducerImpl(PulsarClientImpl client, String topic, ProducerConfigurationData conf, int numPartitions, CompletableFuture> producerCreatedFuture, Schema schema, ProducerInterceptors interceptors) { super(client, topic, conf, producerCreatedFuture, schema, interceptors); - this.producers = new ConcurrentOpenHashMap<>(); + this.producers = + ConcurrentOpenHashMap.>newBuilder().build(); this.topicMetadata = new TopicMetadataImpl(numPartitions); this.routerPolicy = getMessageRouter(); stats = client.getConfiguration().getStatsIntervalSeconds() > 0 ? new ProducerStatsRecorderImpl() : null; @@ -191,6 +193,10 @@ CompletableFuture internalSendAsync(Message message) { @Override CompletableFuture internalSendWithTxnAsync(Message message, Transaction txn) { + CompletableFuture completableFuture = new CompletableFuture<>(); + if (txn != null && !((TransactionImpl)txn).checkIfOpen(completableFuture)) { + return completableFuture; + } int partition = routerPolicy.choosePartition(message, topicMetadata); checkArgument(partition >= 0 && partition < topicMetadata.numPartitions(), "Illegal partition index chosen by the message routing policy: " + partition); @@ -346,7 +352,7 @@ public CompletableFuture onTopicsExtended(Collection topicsExtende if (log.isDebugEnabled()) { log.debug("[{}] partitions number. old: {}, new: {}", - topic, oldPartitionNumber, currentPartitionNumber); + topic, oldPartitionNumber, currentPartitionNumber); } if (oldPartitionNumber == currentPartitionNumber) { @@ -397,10 +403,14 @@ public CompletableFuture onTopicsExtended(Collection topicsExtende } } else { log.error("[{}] not support shrink topic partitions. old: {}, new: {}", - topic, oldPartitionNumber, currentPartitionNumber); + topic, oldPartitionNumber, currentPartitionNumber); future.completeExceptionally(new NotSupportedException("not support shrink topic partitions")); } return future; + }).exceptionally(throwable -> { + log.error("[{}] Auto getting partitions failed", topic, throwable); + future.completeExceptionally(throwable); + return null; }); return future; @@ -410,28 +420,43 @@ public CompletableFuture onTopicsExtended(Collection topicsExtende private TimerTask partitionsAutoUpdateTimerTask = new TimerTask() { @Override public void run(Timeout timeout) throws Exception { - if (timeout.isCancelled() || getState() != State.Ready) { - return; - } + try { + if (timeout.isCancelled() || getState() != State.Ready) { + return; + } - if (log.isDebugEnabled()) { - log.debug("[{}] run partitionsAutoUpdateTimerTask for partitioned producer", topic); - } + if (log.isDebugEnabled()) { + log.debug("[{}] run partitionsAutoUpdateTimerTask for partitioned producer", topic); + } - // if last auto update not completed yet, do nothing. - if (partitionsAutoUpdateFuture == null || partitionsAutoUpdateFuture.isDone()) { - partitionsAutoUpdateFuture = topicsPartitionChangedListener.onTopicsExtended(ImmutableList.of(topic)); + // if last auto update not completed yet, do nothing. + if (partitionsAutoUpdateFuture == null || partitionsAutoUpdateFuture.isDone()) { + partitionsAutoUpdateFuture = topicsPartitionChangedListener.onTopicsExtended(ImmutableList.of(topic)); + } + } catch (Throwable th) { + log.warn("Encountered error in partition auto update timer task for partition producer. Another task will be scheduled.", th); + } finally { + // schedule the next re-check task + partitionsAutoUpdateTimeout = client.timer() + .newTimeout(partitionsAutoUpdateTimerTask, conf.getAutoUpdatePartitionsIntervalSeconds(), TimeUnit.SECONDS); } - - // schedule the next re-check task - partitionsAutoUpdateTimeout = client.timer() - .newTimeout(partitionsAutoUpdateTimerTask, conf.getAutoUpdatePartitionsIntervalSeconds(), TimeUnit.SECONDS); } }; + @VisibleForTesting + public CompletableFuture getPartitionsAutoUpdateFuture() { + return partitionsAutoUpdateFuture; + } + @VisibleForTesting public Timeout getPartitionsAutoUpdateTimeout() { return partitionsAutoUpdateTimeout; } + @VisibleForTesting + public CompletableFuture getOriginalLastSendFuture() { + return CompletableFuture.allOf( + producers.values().stream().map(ProducerImpl::getOriginalLastSendFuture) + .toArray(CompletableFuture[]::new)); + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java index 2f946af712bfb..114cb274bc3f8 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PatternMultiTopicsConsumerImpl.java @@ -24,14 +24,15 @@ import com.google.common.collect.Lists; import io.netty.util.Timeout; import io.netty.util.TimerTask; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import java.util.stream.Collectors; - import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; @@ -81,40 +82,36 @@ public void run(Timeout timeout) throws Exception { return; } - CompletableFuture recheckFuture = new CompletableFuture<>(); - List> futures = Lists.newArrayListWithExpectedSize(2); - - client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode).thenAccept(topics -> { - if (log.isDebugEnabled()) { - log.debug("Get topics under namespace {}, topics.size: {}", namespaceName.toString(), topics.size()); - topics.forEach(topicName -> - log.debug("Get topics under namespace {}, topic: {}", namespaceName.toString(), topicName)); - } - - List newTopics = PulsarClientImpl.topicsPatternFilter(topics, topicsPattern); - List oldTopics = Lists.newArrayList(); - oldTopics.addAll(getPartitionedTopics()); - getPartitions().forEach(p -> { - TopicName t = TopicName.get(p); - if (!t.isPartitioned() || !oldTopics.contains(t.getPartitionedTopicName())) { - oldTopics.add(p); - } - }); - - futures.add(topicsChangeListener.onTopicsAdded(topicsListsMinus(newTopics, oldTopics))); - futures.add(topicsChangeListener.onTopicsRemoved(topicsListsMinus(oldTopics, newTopics))); - FutureUtil.waitForAll(futures) - .thenAccept(finalFuture -> recheckFuture.complete(null)) - .exceptionally(ex -> { + client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode) + .thenCompose(topics -> { + if (log.isDebugEnabled()) { + log.debug("Get topics under namespace {}, topics.size: {}", + namespaceName.toString(), topics.size()); + topics.forEach(topicName -> + log.debug("Get topics under namespace {}, topic: {}", + namespaceName.toString(), topicName)); + } + final List newTopics = PulsarClientImpl.topicsPatternFilter(topics, topicsPattern); + final List oldTopics = new ArrayList<>(getPartitionedTopics()); + for (String partition : getPartitions()) { + TopicName topicName = TopicName.get(partition); + if (!topicName.isPartitioned() || !oldTopics.contains(topicName.getPartitionedTopicName())) { + oldTopics.add(partition); + } + } + final List> listenersCallback = new ArrayList<>(2); + listenersCallback.add(topicsChangeListener.onTopicsAdded(topicsListsMinus(newTopics, oldTopics))); + listenersCallback.add(topicsChangeListener.onTopicsRemoved(topicsListsMinus(oldTopics, newTopics))); + return FutureUtil.waitForAll(Collections.unmodifiableList(listenersCallback)); + }).exceptionally(ex -> { log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); - recheckFuture.completeExceptionally(ex); return null; + }).thenAccept(__ -> { + // schedule the next re-check task + this.recheckPatternTimeout = client.timer() + .newTimeout(PatternMultiTopicsConsumerImpl.this, + Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); }); - }); - - // schedule the next re-check task - this.recheckPatternTimeout = client.timer().newTimeout(PatternMultiTopicsConsumerImpl.this, - Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); } public Pattern getPattern() { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java index 21ef9b38dbed9..c2206bc7468ab 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PersistentAcknowledgmentsGroupingTracker.java @@ -18,8 +18,10 @@ */ package org.apache.pulsar.client.impl; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import io.netty.buffer.ByteBuf; import io.netty.channel.EventLoopGroup; +import io.netty.util.concurrent.FastThreadLocal; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -32,11 +34,8 @@ import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.concurrent.locks.ReentrantReadWriteLock; - -import io.netty.util.Recycler; -import lombok.NonNull; +import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.tuple.Triple; import org.apache.pulsar.client.api.MessageId; @@ -68,17 +67,11 @@ public class PersistentAcknowledgmentsGroupingTracker implements Acknowledgments private volatile TimedCompletableFuture currentIndividualAckFuture; private volatile TimedCompletableFuture currentCumulativeAckFuture; - private volatile LastCumulativeAck lastCumulativeAck = - LastCumulativeAck.create((MessageIdImpl) MessageIdImpl.earliest, null); - - private volatile boolean cumulativeAckFlushRequired = false; + private final LastCumulativeAck lastCumulativeAck = new LastCumulativeAck(); // When we flush the command, we should ensure current ack request will send correct private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private static final AtomicReferenceFieldUpdater LAST_CUMULATIVE_ACK_UPDATER = AtomicReferenceFieldUpdater - .newUpdater(PersistentAcknowledgmentsGroupingTracker.class, LastCumulativeAck.class, "lastCumulativeAck"); - /** * This is a set of all the individual acks that the application has issued and that were not already sent to * broker. @@ -102,7 +95,7 @@ public PersistentAcknowledgmentsGroupingTracker(ConsumerImpl consumer, Consum this.currentCumulativeAckFuture = new TimedCompletableFuture<>(); if (acknowledgementGroupTimeMicros > 0) { - scheduledTask = eventLoopGroup.next().scheduleWithFixedDelay(this::flush, acknowledgementGroupTimeMicros, + scheduledTask = eventLoopGroup.next().scheduleWithFixedDelay(catchingAndLoggingThrowables(this::flush), acknowledgementGroupTimeMicros, acknowledgementGroupTimeMicros, TimeUnit.MICROSECONDS); } else { scheduledTask = null; @@ -114,16 +107,13 @@ public PersistentAcknowledgmentsGroupingTracker(ConsumerImpl consumer, Consum * resent after a disconnection and for which the user has already sent an acknowledgement. */ @Override - public boolean isDuplicate(@NonNull MessageId messageId) { - final MessageId messageIdOfLastAck = lastCumulativeAck.messageId; - if (messageIdOfLastAck == null) { - return false; - } - if (messageId.compareTo(messageIdOfLastAck) <= 0) { + public boolean isDuplicate(MessageId messageId) { + final MessageIdImpl messageIdOfLastAck = lastCumulativeAck.getMessageId(); + if (messageIdOfLastAck != null && messageId.compareTo(messageIdOfLastAck) <= 0) { // Already included in a cumulative ack return true; } else { - return pendingIndividualAcks.contains(messageId); + return pendingIndividualAcks.contains((MessageIdImpl) messageId); } } @@ -362,7 +352,7 @@ private void doIndividualBatchAckAsync(BatchMessageIdImpl batchMessageId) { value = ConcurrentBitSetRecyclable.create(batchMessageId.getAcker().getBitSet()); } else { value = ConcurrentBitSetRecyclable.create(); - value.set(0, batchMessageId.getBatchIndex()); + value.set(0, batchMessageId.getOriginalBatchSize()); } return value; }); @@ -371,30 +361,7 @@ private void doIndividualBatchAckAsync(BatchMessageIdImpl batchMessageId) { private void doCumulativeAckAsync(MessageIdImpl msgId, BitSetRecyclable bitSet) { // Handle concurrent updates from different threads - LastCumulativeAck currentCumulativeAck = LastCumulativeAck.create(msgId, bitSet); - while (true) { - LastCumulativeAck lastCumulativeAck = this.lastCumulativeAck; - if (msgId.compareTo(lastCumulativeAck.messageId) > 0) { - if (LAST_CUMULATIVE_ACK_UPDATER.compareAndSet(this, this.lastCumulativeAck, currentCumulativeAck)) { - if (lastCumulativeAck.bitSetRecyclable != null) { - try { - lastCumulativeAck.bitSetRecyclable.recycle(); - } catch (Exception ignore) { - // no-op - } - lastCumulativeAck.bitSetRecyclable = null; - } - lastCumulativeAck.recycle(); - // Successfully updated the last cumulative ack. Next flush iteration will send this to broker. - cumulativeAckFlushRequired = true; - return; - } - } else { - currentCumulativeAck.recycle(); - // message id acknowledging an before the current last cumulative ack - return; - } - } + lastCumulativeAck.update(msgId, bitSet); } private CompletableFuture doCumulativeBatchIndexAck(BatchMessageIdImpl batchMessageId, @@ -475,15 +442,15 @@ public void flush() { } private void flushAsync(ClientCnx cnx) { + final LastCumulativeAck lastCumulativeAckToFlush = lastCumulativeAck.flush(); boolean shouldFlush = false; - if (cumulativeAckFlushRequired) { - newMessageAckCommandAndWrite(cnx, consumer.consumerId, lastCumulativeAck.messageId.ledgerId, - lastCumulativeAck.messageId.getEntryId(), lastCumulativeAck.bitSetRecyclable, - AckType.Cumulative, null, Collections.emptyMap(), false, - this.currentCumulativeAckFuture, null); - this.consumer.unAckedChunkedMessageIdSequenceMap.remove(lastCumulativeAck.messageId); + if (lastCumulativeAckToFlush != null) { shouldFlush = true; - cumulativeAckFlushRequired = false; + final MessageIdImpl messageId = lastCumulativeAckToFlush.getMessageId(); + newMessageAckCommandAndWrite(cnx, consumer.consumerId, messageId.getLedgerId(), messageId.getEntryId(), + lastCumulativeAckToFlush.getBitSetRecyclable(), AckType.Cumulative, null, + Collections.emptyMap(), false, this.currentCumulativeAckFuture, null); + this.consumer.unAckedChunkedMessageIdSequenceMap.remove(messageId); } // Flush all individual acks @@ -556,7 +523,7 @@ private void flushAsync(ClientCnx cnx) { @Override public void flushAndClean() { flush(); - lastCumulativeAck = LastCumulativeAck.create((MessageIdImpl) MessageIdImpl.earliest, null); + lastCumulativeAck.reset(); pendingIndividualAcks.clear(); } @@ -659,36 +626,72 @@ private boolean isAckReceiptEnabled(ClientCnx cnx) { return ackReceiptEnabled && cnx != null && Commands.peerSupportsAckReceipt(cnx.getRemoteEndpointProtocolVersion()); } +} - private static class LastCumulativeAck { - private MessageIdImpl messageId; - private BitSetRecyclable bitSetRecyclable; +@Getter +class LastCumulativeAck { - static LastCumulativeAck create(MessageIdImpl messageId, BitSetRecyclable bitSetRecyclable) { - LastCumulativeAck op = RECYCLER.get(); - op.messageId = messageId; - op.bitSetRecyclable = bitSetRecyclable; - return op; - } + // It's used as a returned value by `flush()` to avoid creating a new instance each time `flush()` is called + public static final FastThreadLocal LOCAL_LAST_CUMULATIVE_ACK = + new FastThreadLocal() { - private LastCumulativeAck(Recycler.Handle recyclerHandle) { - this.recyclerHandle = recyclerHandle; - } + @Override + protected LastCumulativeAck initialValue() { + return new LastCumulativeAck(); + } + }; + public static final MessageIdImpl DEFAULT_MESSAGE_ID = (MessageIdImpl) MessageIdImpl.earliest; - void recycle() { - if (bitSetRecyclable != null) { + private volatile MessageIdImpl messageId = DEFAULT_MESSAGE_ID; + private BitSetRecyclable bitSetRecyclable = null; + private boolean flushRequired = false; + + public synchronized void update(final MessageIdImpl messageId, final BitSetRecyclable bitSetRecyclable) { + if (messageId.compareTo(this.messageId) > 0) { + if (this.bitSetRecyclable != null && this.bitSetRecyclable != bitSetRecyclable) { this.bitSetRecyclable.recycle(); } - this.messageId = null; - recyclerHandle.recycle(this); + set(messageId, bitSetRecyclable); + flushRequired = true; } + } - private final Recycler.Handle recyclerHandle; - private static final Recycler RECYCLER = new Recycler() { - @Override - protected LastCumulativeAck newObject(Handle handle) { - return new LastCumulativeAck(handle); + public synchronized LastCumulativeAck flush() { + if (flushRequired) { + final LastCumulativeAck localLastCumulativeAck = LOCAL_LAST_CUMULATIVE_ACK.get(); + if (bitSetRecyclable != null) { + localLastCumulativeAck.set(messageId, BitSetRecyclable.valueOf(bitSetRecyclable.toLongArray())); + } else { + localLastCumulativeAck.set(this.messageId, null); } - }; + flushRequired = false; + return localLastCumulativeAck; + } else { + // Return null to indicate nothing to be flushed + return null; + } + } + + public synchronized void reset() { + if (bitSetRecyclable != null) { + bitSetRecyclable.recycle(); + } + messageId = DEFAULT_MESSAGE_ID; + bitSetRecyclable = null; + flushRequired = false; + } + + private synchronized void set(final MessageIdImpl messageId, final BitSetRecyclable bitSetRecyclable) { + this.messageId = messageId; + this.bitSetRecyclable = bitSetRecyclable; + } + + @Override + public String toString() { + String s = messageId.toString(); + if (bitSetRecyclable != null) { + s += " (bit set: " + bitSetRecyclable + ")"; + } + return s; } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java index d164d09c4c4b4..053fb529596e7 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java @@ -53,7 +53,8 @@ protected ProducerBase(PulsarClientImpl client, String topic, ProducerConfigurat this.conf = conf; this.schema = schema; this.interceptors = interceptors; - this.schemaCache = new ConcurrentOpenHashMap<>(); + this.schemaCache = + ConcurrentOpenHashMap.newBuilder().build(); if (!conf.isMultiSchema()) { multiSchemaMode = MultiSchemaMode.Disabled; } @@ -91,11 +92,6 @@ public TypedMessageBuilder newMessage(Schema schema) { public TypedMessageBuilder newMessage(Transaction txn) { checkArgument(txn instanceof TransactionImpl); - // check the producer has proper settings to send transactional messages - if (conf.getSendTimeoutMs() > 0) { - throw new IllegalArgumentException("Only producers disabled sendTimeout are allowed to" - + " produce transactional messages"); - } return new TypedMessageBuilderImpl<>(this, schema, (TransactionImpl) txn); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java index 0f27a63ee8e5b..f015c23ce23b5 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerImpl.java @@ -29,6 +29,7 @@ import static org.apache.pulsar.client.impl.ProducerBase.MultiSchemaMode.Enabled; import static org.apache.pulsar.common.protocol.Commands.hasChecksum; import static org.apache.pulsar.common.protocol.Commands.readChecksum; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; import io.netty.util.Recycler; @@ -53,7 +54,9 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.BatcherBuilder; @@ -87,6 +90,7 @@ import org.apache.pulsar.common.schema.SchemaType; import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.RelativeTimeUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -111,6 +115,7 @@ public class ProducerImpl extends ProducerBase implements TimerTask, Conne private final BatchMessageContainerBase batchMessageContainer; private CompletableFuture lastSendFuture = CompletableFuture.completedFuture(null); + private LastSendFutureWrapper lastSendFutureWrapper = LastSendFutureWrapper.create(lastSendFuture); // Globally unique producer name private String producerName; @@ -143,11 +148,13 @@ public class ProducerImpl extends ProducerBase implements TimerTask, Conne private final ConnectionHandler connectionHandler; - private ScheduledFuture batchTimerTask; + private final AtomicReference batchTimerTask; private Optional topicEpoch = Optional.empty(); private final List previousExceptions = new CopyOnWriteArrayList(); + private boolean errorState; + @SuppressWarnings("rawtypes") private static final AtomicLongFieldUpdater msgIdGeneratorUpdater = AtomicLongFieldUpdater .newUpdater(ProducerImpl.class, "msgIdGenerator"); @@ -204,7 +211,7 @@ public ProducerImpl(PulsarClientImpl client, String topic, ProducerConfiguration if (this.msgCrypto != null) { // Regenerate data key cipher at fixed interval - keyGeneratorTask = client.eventLoopGroup().scheduleWithFixedDelay(() -> { + keyGeneratorTask = client.eventLoopGroup().scheduleWithFixedDelay(catchingAndLoggingThrowables(() -> { try { msgCrypto.addPublicKeyCipher(conf.getEncryptionKeys(), conf.getCryptoKeyReader()); } catch (CryptoException e) { @@ -217,7 +224,7 @@ public ProducerImpl(PulsarClientImpl client, String topic, ProducerConfiguration producerName, topic))); } } - }, 0L, 4L, TimeUnit.HOURS); + }), 0L, 4L, TimeUnit.HOURS); } if (conf.getSendTimeoutMs() > 0) { @@ -232,8 +239,10 @@ public ProducerImpl(PulsarClientImpl client, String topic, ProducerConfiguration } this.batchMessageContainer = (BatchMessageContainerBase)containerBuilder.build(); this.batchMessageContainer.setProducer(this); + this.batchTimerTask = new AtomicReference<>(); } else { this.batchMessageContainer = null; + this.batchTimerTask = null; } if (client.getConfiguration().getStatsIntervalSeconds() > 0) { stats = new ProducerStatsRecorderImpl(client, conf, this); @@ -258,6 +267,22 @@ public ProducerImpl(PulsarClientImpl client, String topic, ProducerConfiguration grabCnx(); } + protected void semaphoreRelease(final int releaseCountRequest) { + if (semaphore.isPresent()) { + if (!errorState) { + final int availableReleasePermits = + conf.getMaxPendingMessages() - this.semaphore.get().availablePermits(); + if (availableReleasePermits - releaseCountRequest < 0) { + log.error("Semaphore permit release count request greater then availableReleasePermits" + + " : availableReleasePermits={}, releaseCountRequest={}", + availableReleasePermits, releaseCountRequest); + errorState = true; + } + } + semaphore.get().release(releaseCountRequest); + } + } + protected OpSendMsgQueue createPendingMessagesQueue() { return new OpSendMsgQueue(); } @@ -369,6 +394,10 @@ CompletableFuture internalSendWithTxnAsync(Message message, Transa if (txn == null) { return internalSendAsync(message); } else { + CompletableFuture completableFuture = new CompletableFuture<>(); + if (!((TransactionImpl)txn).checkIfOpen(completableFuture)) { + return completableFuture; + } return ((TransactionImpl) txn).registerProducedTopic(topic) .thenCompose(ignored -> internalSendAsync(message)); } @@ -446,6 +475,8 @@ public void sendAsync(Message message, SendCallback callback) { // chunked message also sent individually so, try to acquire send-permits for (int i = 0; i < (totalChunks - 1); i++) { if (!canEnqueueRequest(callback, message.getSequenceId(), 0 /* The memory was already reserved */)) { + client.getMemoryLimitController().releaseMemory(uncompressedSize); + semaphoreRelease(i + 1); return; } } @@ -461,7 +492,23 @@ public void sendAsync(Message message, SendCallback callback) { sequenceId = msgMetadata.getSequenceId(); } String uuid = totalChunks > 1 ? String.format("%s-%d", producerName, sequenceId) : null; + byte[] schemaVersion = totalChunks > 1 && msg.getMessageBuilder().hasSchemaVersion() ? + msg.getMessageBuilder().getSchemaVersion() : null; + byte[] orderingKey = totalChunks > 1 && msg.getMessageBuilder().hasOrderingKey() ? + msg.getMessageBuilder().getOrderingKey() : null; for (int chunkId = 0; chunkId < totalChunks; chunkId++) { + // Need to reset the schemaVersion, because the schemaVersion is based on a ByteBuf object in + // `MessageMetadata`, if we want to re-serialize the `SEND` command using a same `MessageMetadata`, + // we need to reset the ByteBuf of the schemaVersion in `MessageMetadata`, I think we need to + // reset `ByteBuf` objects in `MessageMetadata` after call the method `MessageMetadata#writeTo()`. + if (chunkId > 0) { + if (schemaVersion != null) { + msg.getMessageBuilder().setSchemaVersion(schemaVersion); + } + if (orderingKey != null) { + msg.getMessageBuilder().setOrderingKey(orderingKey); + } + } serializeAndSendMessage(msg, payload, sequenceId, uuid, chunkId, totalChunks, readStartIndex, ClientCnx.getMaxMessageSize(), compressedPayload, compressed, compressedPayload.readableBytes(), uncompressedSize, callback); @@ -579,13 +626,22 @@ private void serializeAndSendMessage(MessageImpl msg, ByteBuf payload, } } - private boolean populateMessageSchema(MessageImpl msg, SendCallback callback) { + @VisibleForTesting + boolean populateMessageSchema(MessageImpl msg, SendCallback callback) { MessageMetadata msgMetadataBuilder = msg.getMessageBuilder(); if (msg.getSchemaInternal() == schema) { schemaVersion.ifPresent(v -> msgMetadataBuilder.setSchemaVersion(v)); msg.setSchemaState(MessageImpl.SchemaState.Ready); return true; } + // If the message is from the replicator and without replicated schema + // Which means the message is written with BYTES schema + // So we don't need to replicate schema to the remote cluster + if (msg.hasReplicateFrom() && msg.getSchemaInfoForReplicator() == null) { + msg.setSchemaState(MessageImpl.SchemaState.Ready); + return true; + } + if (!isMultiSchemaEnabled(true)) { PulsarClientException.InvalidMessageException e = new PulsarClientException.InvalidMessageException( format("The producer %s of the topic %s is disabled the `MultiSchema`", producerName, topic) @@ -593,8 +649,7 @@ private boolean populateMessageSchema(MessageImpl msg, SendCallback callback) { completeCallbackAndReleaseSemaphore(msg.getUncompressedSize(), callback, e); return false; } - SchemaHash schemaHash = SchemaHash.of(msg.getSchemaInternal()); - byte[] schemaVersion = schemaCache.get(schemaHash); + byte[] schemaVersion = schemaCache.get(msg.getSchemaHash()); if (schemaVersion != null) { msgMetadataBuilder.setSchemaVersion(schemaVersion); msg.setSchemaState(MessageImpl.SchemaState.Ready); @@ -603,8 +658,7 @@ private boolean populateMessageSchema(MessageImpl msg, SendCallback callback) { } private boolean rePopulateMessageSchema(MessageImpl msg) { - SchemaHash schemaHash = SchemaHash.of(msg.getSchemaInternal()); - byte[] schemaVersion = schemaCache.get(schemaHash); + byte[] schemaVersion = schemaCache.get(msg.getSchemaHash()); if (schemaVersion == null) { return false; } @@ -613,7 +667,7 @@ private boolean rePopulateMessageSchema(MessageImpl msg) { return true; } - private void tryRegisterSchema(ClientCnx cnx, MessageImpl msg, SendCallback callback) { + private void tryRegisterSchema(ClientCnx cnx, MessageImpl msg, SendCallback callback, long expectedCnxEpoch) { if (!changeToRegisteringSchemaState()) { return; } @@ -630,15 +684,19 @@ private void tryRegisterSchema(ClientCnx cnx, MessageImpl msg, SendCallback call callback.sendComplete((PulsarClientException.IncompatibleSchemaException) t); } } else { - log.warn("[{}] [{}] GetOrCreateSchema succeed", topic, producerName); - SchemaHash schemaHash = SchemaHash.of(msg.getSchemaInternal()); - schemaCache.putIfAbsent(schemaHash, v); - msg.getMessageBuilder().setSchemaVersion(v); + log.info("[{}] [{}] GetOrCreateSchema succeed", topic, producerName); + // In broker, if schema version is an empty byte array, it means the topic doesn't have schema. In this + // case, we should not cache the schema version so that the schema version of the message metadata will + // be null, instead of an empty array. + if (v.length != 0) { + schemaCache.putIfAbsent(msg.getSchemaHash(), v); + msg.getMessageBuilder().setSchemaVersion(v); + } msg.setSchemaState(MessageImpl.SchemaState.Ready); } cnx.ctx().channel().eventLoop().execute(() -> { synchronized (ProducerImpl.this) { - recoverProcessOpSendMsgFrom(cnx, msg); + recoverProcessOpSendMsgFrom(cnx, msg, expectedCnxEpoch); } }); return null; @@ -841,6 +899,31 @@ protected WriteInEventLoopCallback newObject(Handle ha }; } + private static final class LastSendFutureWrapper { + private final CompletableFuture lastSendFuture; + private static final int FALSE = 0; + private static final int TRUE = 1; + private static final AtomicIntegerFieldUpdater THROW_ONCE_UPDATER = + AtomicIntegerFieldUpdater.newUpdater(LastSendFutureWrapper.class, "throwOnce"); + private volatile int throwOnce = FALSE; + + private LastSendFutureWrapper(CompletableFuture lastSendFuture) { + this.lastSendFuture = lastSendFuture; + } + static LastSendFutureWrapper create(CompletableFuture lastSendFuture) { + return new LastSendFutureWrapper(lastSendFuture); + } + public CompletableFuture handleOnce() { + return lastSendFuture.handle((ignore, t) -> { + if (t != null && THROW_ONCE_UPDATER.compareAndSet(this, FALSE, TRUE)) { + throw FutureUtil.wrapToCompletionException(t); + } + return null; + }); + } + } + + @Override public CompletableFuture closeAsync() { final State currentState = getAndUpdateState(state -> { @@ -854,23 +937,7 @@ public CompletableFuture closeAsync() { return CompletableFuture.completedFuture(null); } - Timeout timeout = sendTimeout; - if (timeout != null) { - timeout.cancel(); - sendTimeout = null; - } - - ScheduledFuture batchTimerTask = this.batchTimerTask; - if (batchTimerTask != null) { - batchTimerTask.cancel(false); - this.batchTimerTask = null; - } - - if (keyGeneratorTask != null && !keyGeneratorTask.isCancelled()) { - keyGeneratorTask.cancel(false); - } - - stats.cancelStatsTimeout(); + closeProducerTasks(); ClientCnx cnx = cnx(); if (cnx == null || currentState != State.Ready) { @@ -913,7 +980,22 @@ private synchronized void closeAndClearPendingMessages() { @Override public boolean isConnected() { - return connectionHandler.cnx() != null && (getState() == State.Ready); + return getCnxIfReady() != null; + } + + /** + * Hook method for testing. By returning null, it's possible to prevent messages + * being delivered to the broker. + * + * @return cnx if OpSend messages should be written to open connection. Caller must + * verify that the returned cnx is not null before using reference. + */ + protected ClientCnx getCnxIfReady() { + if (getState() == State.Ready) { + return connectionHandler.cnx(); + } else { + return null; + } } @Override @@ -952,7 +1034,7 @@ void ackReceived(ClientCnx cnx, long sequenceId, long highestSequenceId, long le if (sequenceId > op.sequenceId) { log.warn("[{}] [{}] Got ack for msg. expecting: {} - {} - got: {} - {} - queue-size: {}", topic, producerName, - op.sequenceId, op.highestSequenceId, sequenceId, highestSequenceId, pendingMessages.size()); + op.sequenceId, op.highestSequenceId, sequenceId, highestSequenceId, pendingMessages.messagesCount()); // Force connection closing so that messages can be re-transmitted in a new connection cnx.channel().close(); return; @@ -974,7 +1056,7 @@ void ackReceived(ClientCnx cnx, long sequenceId, long highestSequenceId, long le releaseSemaphoreForSendOp(op); } else { log.warn("[{}] [{}] Got ack for batch msg error. expecting: {} - {} - got: {} - {} - queue-size: {}", topic, producerName, - op.sequenceId, op.highestSequenceId, sequenceId, highestSequenceId, pendingMessages.size()); + op.sequenceId, op.highestSequenceId, sequenceId, highestSequenceId, pendingMessages.messagesCount()); // Force connection closing so that messages can be re-transmitted in a new connection cnx.channel().close(); return; @@ -1005,9 +1087,9 @@ private long getHighestSequenceId(OpSendMsg op) { } private void releaseSemaphoreForSendOp(OpSendMsg op) { - if (semaphore.isPresent()) { - semaphore.get().release(isBatchMessagingEnabled() ? op.numMessagesInBatch : 1); - } + + semaphoreRelease(isBatchMessagingEnabled() ? op.numMessagesInBatch : 1); + client.getMemoryLimitController().releaseMemory(op.uncompressedSize); } @@ -1070,19 +1152,17 @@ protected synchronized void recoverChecksumError(ClientCnx cnx, long sequenceId) } } // as msg is not corrupted : let producer resend pending-messages again including checksum failed message - resendMessages(cnx); + resendMessages(cnx, this.connectionHandler.getEpoch()); } - protected synchronized void recoverNotAllowedError(long sequenceId) { + protected synchronized void recoverNotAllowedError(long sequenceId, String errorMsg) { OpSendMsg op = pendingMessages.peek(); if(op != null && sequenceId == getHighestSequenceId(op)){ pendingMessages.remove(); releaseSemaphoreForSendOp(op); try { op.sendComplete( - new PulsarClientException.NotAllowedException( - format("The size of the message which is produced by producer %s to the topic " + - "%s is not allowed", producerName, topic))); + new PulsarClientException.NotAllowedException(errorMsg)); } catch (Throwable t) { log.warn("[{}] [{}] Got exception while completing the callback for msg {}:", topic, producerName, sequenceId, t); @@ -1208,11 +1288,12 @@ void sendComplete(final Exception e) { long sequenceId = te.getSequenceId(); long ns = System.nanoTime(); String errMsg = String.format( - "%s : createdAt %s ns ago, firstSentAt %s ns ago, lastSentAt %s ns ago, retryCount %s", + "%s : createdAt %s seconds ago, firstSentAt %s seconds ago, lastSentAt %s seconds ago, " + + "retryCount %s", te.getMessage(), - ns - this.createdAt, - ns - this.firstSentAt, - ns - this.lastSentAt, + RelativeTimeUtil.nsToSeconds(ns - this.createdAt), + RelativeTimeUtil.nsToSeconds(this.firstSentAt <= 0 ? ns - this.lastSentAt : ns - this.firstSentAt), + RelativeTimeUtil.nsToSeconds(ns - this.lastSentAt), retryCount ); @@ -1275,6 +1356,7 @@ protected OpSendMsg newObject(Handle handle) { }; } + /** * Queue implementation that is used as the pending messages queue. * @@ -1289,6 +1371,7 @@ protected static class OpSendMsgQueue implements Iterable { private final Queue delegate = new ArrayDeque<>(); private int forEachDepth = 0; private List postponedOpSendMgs; + private final AtomicInteger messagesCount = new AtomicInteger(0); @Override public void forEach(Consumer action) { @@ -1309,6 +1392,7 @@ public void forEach(Consumer action) { public boolean add(OpSendMsg o) { // postpone adding to the queue while forEach iteration is in progress + messagesCount.addAndGet(o.numMessagesInBatch); if (forEachDepth > 0) { if (postponedOpSendMgs == null) { postponedOpSendMgs = new ArrayList<>(); @@ -1321,18 +1405,22 @@ public boolean add(OpSendMsg o) { public void clear() { delegate.clear(); + messagesCount.set(0); } public void remove() { - delegate.remove(); + OpSendMsg op = delegate.remove(); + if (op != null) { + messagesCount.addAndGet(-op.numMessagesInBatch); + } } public OpSendMsg peek() { return delegate.peek(); } - public int size() { - return delegate.size(); + public int messagesCount() { + return messagesCount.get(); } @Override @@ -1345,9 +1433,17 @@ public Iterator iterator() { public void connectionOpened(final ClientCnx cnx) { previousExceptions.clear(); - // we set the cnx reference before registering the producer on the cnx, so if the cnx breaks before creating the - // producer, it will try to grab a new cnx - connectionHandler.setClientCnx(cnx); + final long epoch; + synchronized (this) { + // Because the state could have been updated while retrieving the connection, we set it back to connecting, + // as long as the change from current state to connecting is a valid state change. + if (!changeToConnecting()) { + return; + } + // We set the cnx reference before registering the producer on the cnx, so if the cnx breaks before creating + // the producer, it will try to grab a new cnx. We also increment and get the epoch value for the producer. + epoch = connectionHandler.switchClientCnx(cnx); + } cnx.registerProducer(producerId, this); log.info("[{}] [{}] Creating producer on cnx {}", topic, producerName, cnx.ctx().channel()); @@ -1384,7 +1480,7 @@ public void connectionOpened(final ClientCnx cnx) { cnx.sendRequestWithId( Commands.newProducer(topic, producerId, requestId, producerName, conf.isEncryptionEnabled(), metadata, - schemaInfo, connectionHandler.getEpoch(), userProvidedProducerName, + schemaInfo, epoch, userProvidedProducerName, conf.getAccessMode(), topicEpoch, client.conf.isEnableTransaction()), requestId).thenAccept(response -> { String producerName = response.getProducerName(); @@ -1427,26 +1523,13 @@ public void connectionOpened(final ClientCnx cnx) { if (!producerCreatedFuture.isDone() && isBatchMessagingEnabled()) { // schedule the first batch message task - batchTimerTask = cnx.ctx().executor().scheduleAtFixedRate(() -> { - if (log.isTraceEnabled()) { - log.trace( - "[{}] [{}] Batching the messages from the batch container from timer thread", - topic, - producerName); - } - // semaphore acquired when message was enqueued to container - synchronized (ProducerImpl.this) { - // If it's closing/closed we need to ignore the send batch timer and not - // schedule next timeout. - if (getState() == State.Closing || getState() == State.Closed) { - return; - } - - batchMessageAndSend(); - } - }, 0, conf.getBatchingMaxPublishDelayMicros(), TimeUnit.MICROSECONDS); + Timeout task = client.timer() + .newTimeout(this::triggerBatchMessageAndSend, + conf.getBatchingMaxPublishDelayMicros(), TimeUnit.MICROSECONDS); + + batchTimerTask.set(task); } - resendMessages(cnx); + resendMessages(cnx, epoch); } }).exceptionally((e) -> { Throwable cause = e.getCause(); @@ -1457,7 +1540,27 @@ public void connectionOpened(final ClientCnx cnx) { cnx.channel().close(); return null; } + + if (cause instanceof TimeoutException) { + // Creating the producer has timed out. We need to ensure the broker closes the producer + // in case it was indeed created, otherwise it might prevent new create producer operation, + // since we are not necessarily closing the connection. + long closeRequestId = client.newRequestId(); + ByteBuf cmd = Commands.newCloseProducer(producerId, closeRequestId); + cnx.sendRequestWithId(cmd, closeRequestId); + } + log.error("[{}] [{}] Failed to create producer: {}", topic, producerName, cause.getMessage()); + // Close the producer since topic does not exist. + if (cause instanceof PulsarClientException.ProducerFencedException) { + if (log.isDebugEnabled()) { + log.debug("[{}] [{}] Failed to create producer: {}", + topic, producerName, cause.getMessage()); + } + } else { + log.error("[{}] [{}] Failed to create producer: {}", topic, producerName, cause.getMessage()); + } + // Close the producer since topic does not exists. if (cause instanceof PulsarClientException.TopicDoesNotExistException) { closeAsync().whenComplete((v, ex) -> { @@ -1475,7 +1578,7 @@ public void connectionOpened(final ClientCnx cnx) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Pending messages: {}", topic, producerName, - pendingMessages.size()); + pendingMessages.messagesCount()); } PulsarClientException bqe = new PulsarClientException.ProducerBlockedQuotaExceededException( @@ -1494,6 +1597,7 @@ public void connectionOpened(final ClientCnx cnx) { failPendingMessages(cnx(), (PulsarClientException) cause); } producerCreatedFuture.completeExceptionally(cause); + closeProducerTasks(); client.cleanupProducer(this); } else if (cause instanceof PulsarClientException.ProducerFencedException) { setState(State.ProducerFenced); @@ -1501,6 +1605,7 @@ public void connectionOpened(final ClientCnx cnx) { failPendingMessages(cnx(), (PulsarClientException) cause); } producerCreatedFuture.completeExceptionally(cause); + closeProducerTasks(); client.cleanupProducer(this); } else if (producerCreatedFuture.isDone() || // (cause instanceof PulsarClientException && PulsarClientException.isRetriableError(cause) @@ -1511,6 +1616,7 @@ public void connectionOpened(final ClientCnx cnx) { } else { setState(State.Failed); producerCreatedFuture.completeExceptionally(cause); + closeProducerTasks(); client.cleanupProducer(this); Timeout timeout = sendTimeout; if (timeout != null) { @@ -1523,6 +1629,31 @@ public void connectionOpened(final ClientCnx cnx) { }); } + private void triggerBatchMessageAndSend(Timeout timeout) { + client.getInternalExecutorService().execute(catchingAndLoggingThrowables(() -> { + if (log.isTraceEnabled()) { + log.trace("[{}] [{}] Batching the messages from the batch container from " + "timer thread", topic, producerName); + } + // semaphore acquired when message was enqueued to container + synchronized (ProducerImpl.this) { + // If it's closing/closed we need to ignore the send batch timer and not + // schedule next timeout. + if (getState() == State.Closing || getState() == State.Closed) { + return; + } + + batchMessageAndSend(); + } + + Timeout task = client.timer() + .newTimeout(this::triggerBatchMessageAndSend, + conf.getBatchingMaxPublishDelayMicros(), TimeUnit.MICROSECONDS); + + batchTimerTask.set(task); + + })); + } + @Override public void connectionFailed(PulsarClientException exception) { boolean nonRetriableError = !PulsarClientException.isRetriableError(exception); @@ -1535,6 +1666,7 @@ public void connectionFailed(PulsarClientException exception) { } else { log.info("[{}] Producer creation failed for producer {} after producerTimeout", topic, producerId); } + closeProducerTasks(); setState(State.Failed); client.cleanupProducer(this); } @@ -1543,7 +1675,28 @@ public void connectionFailed(PulsarClientException exception) { } } - private void resendMessages(ClientCnx cnx) { + private void closeProducerTasks() { + Timeout timeout = sendTimeout; + if (timeout != null) { + timeout.cancel(); + sendTimeout = null; + } + + if (batchTimerTask != null) { + Timeout batchTimerTask = this.batchTimerTask.getAndSet(null); + if (batchTimerTask != null) { + batchTimerTask.cancel(); + } + } + + if (keyGeneratorTask != null && !keyGeneratorTask.isCancelled()) { + keyGeneratorTask.cancel(false); + } + + stats.cancelStatsTimeout(); + } + + private void resendMessages(ClientCnx cnx, long expectedEpoch) { cnx.ctx().channel().eventLoop().execute(() -> { synchronized (this) { if (getState() == State.Closing || getState() == State.Closed) { @@ -1552,7 +1705,7 @@ private void resendMessages(ClientCnx cnx) { cnx.channel().close(); return; } - int messagesToResend = pendingMessages.size(); + int messagesToResend = pendingMessages.messagesCount(); if (messagesToResend == 0) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] No pending messages to resend {}", topic, producerName, messagesToResend); @@ -1570,7 +1723,7 @@ private void resendMessages(ClientCnx cnx) { } log.info("[{}] [{}] Re-Sending {} messages to server", topic, producerName, messagesToResend); - recoverProcessOpSendMsgFrom(cnx, null); + recoverProcessOpSendMsgFrom(cnx, null, expectedEpoch); } }); } @@ -1658,7 +1811,7 @@ public void run(Timeout timeout) throws Exception { // The diff is less than or equal to zero, meaning that the message has been timed out. // Set the callback to timeout on every message, then clear the pending queue. log.info("[{}] [{}] Message send timed out. Failing {} messages", topic, producerName, - pendingMessages.size()); + pendingMessages.messagesCount()); PulsarClientException te = new PulsarClientException.TimeoutException( format("The producer %s can not send message to the topic %s within given timeout", @@ -1706,7 +1859,7 @@ private void failPendingMessages(ClientCnx cnx, PulsarClientException ex) { }); pendingMessages.clear(); - semaphore.ifPresent(s -> s.release(releaseCount.get())); + semaphoreRelease(releaseCount.get()); if (batchMessagingEnabled) { failPendingBatchMessages(ex); } @@ -1731,20 +1884,25 @@ private void failPendingBatchMessages(PulsarClientException ex) { return; } final int numMessagesInBatch = batchMessageContainer.getNumMessagesInBatch(); + final long currentBatchSize = batchMessageContainer.getCurrentBatchSize(); + semaphoreRelease(numMessagesInBatch); + client.getMemoryLimitController().releaseMemory(currentBatchSize); batchMessageContainer.discard(ex); - semaphore.ifPresent(s -> s.release(numMessagesInBatch)); } @Override public CompletableFuture flushAsync() { - CompletableFuture lastSendFuture; synchronized (ProducerImpl.this) { if (isBatchMessagingEnabled()) { batchMessageAndSend(); } - lastSendFuture = this.lastSendFuture; + CompletableFuture lastSendFuture = this.lastSendFuture; + if (!(lastSendFuture == this.lastSendFutureWrapper.lastSendFuture)) { + this.lastSendFutureWrapper = LastSendFutureWrapper.create(lastSendFuture); + } } - return lastSendFuture.thenApply(ignored -> null); + + return this.lastSendFutureWrapper.handleOnce(); } @Override @@ -1774,11 +1932,7 @@ private void batchMessageAndSend() { for (OpSendMsg opSendMsg : opSendMsgs) { processOpSendMsg(opSendMsg); } - } catch (PulsarClientException e) { - Thread.currentThread().interrupt(); - semaphore.ifPresent(s -> s.release(batchMessageContainer.getNumMessagesInBatch())); } catch (Throwable t) { - semaphore.ifPresent(s -> s.release(batchMessageContainer.getNumMessagesInBatch())); log.warn("[{}] [{}] error while create opSendMsg by batch message container", topic, producerName, t); } } @@ -1797,10 +1951,11 @@ protected void processOpSendMsg(OpSendMsg op) { LAST_SEQ_ID_PUSHED_UPDATER.getAndUpdate(this, last -> Math.max(last, getHighestSequenceId(op))); } - if (shouldWriteOpSendMsg()) { - ClientCnx cnx = cnx(); + + final ClientCnx cnx = getCnxIfReady(); + if (cnx != null) { if (op.msg != null && op.msg.getSchemaState() == None) { - tryRegisterSchema(cnx, op.msg, op.callback); + tryRegisterSchema(cnx, op.msg, op.callback, this.connectionHandler.getEpoch()); return; } // If we do have a connection, the message is sent immediately, otherwise we'll try again once a new @@ -1821,17 +1976,16 @@ protected void processOpSendMsg(OpSendMsg op) { } } - /** - * Hook method for testing. By returning false, it's possible to prevent messages - * being delivered to the broker. - * - * @return true if OpSend messages should be written to open connection - */ - protected boolean shouldWriteOpSendMsg() { - return isConnected(); - } - - private void recoverProcessOpSendMsgFrom(ClientCnx cnx, MessageImpl from) { + // Must acquire a lock on ProducerImpl.this before calling method. + private void recoverProcessOpSendMsgFrom(ClientCnx cnx, MessageImpl from, long expectedEpoch) { + if (expectedEpoch != this.connectionHandler.getEpoch() || cnx() == null) { + // In this case, the cnx passed to this method is no longer the active connection. This method will get + // called again once the new connection registers the producer with the broker. + log.info("[{}][{}] Producer epoch mismatch or the current connection is null. Skip re-sending the " + + " {} pending messages since they will deliver using another connection.", topic, producerName, + pendingMessages.messagesCount()); + return; + } final boolean stripChecksum = cnx.getRemoteEndpointProtocolVersion() < brokerChecksumSupportedVersion(); Iterator msgIterator = pendingMessages.iterator(); OpSendMsg pendingRegisteringOp = null; @@ -1880,7 +2034,7 @@ private void recoverProcessOpSendMsgFrom(ClientCnx cnx, MessageImpl from) { return; } if (pendingRegisteringOp != null) { - tryRegisterSchema(cnx, pendingRegisteringOp.msg, pendingRegisteringOp.callback); + tryRegisterSchema(cnx, pendingRegisteringOp.msg, pendingRegisteringOp.callback, expectedEpoch); } } @@ -1901,7 +2055,7 @@ public String getConnectedSince() { } public int getPendingQueueSize() { - return pendingMessages.size(); + return pendingMessages.messagesCount(); } @Override @@ -1948,5 +2102,16 @@ Optional getSemaphore() { return semaphore; } + @VisibleForTesting + boolean isErrorStat() { + return errorState; + } + + @VisibleForTesting + CompletableFuture getOriginalLastSendFuture() { + CompletableFuture lastSendFuture = this.lastSendFuture; + return lastSendFuture.thenApply(ignore -> null); + } + private static final Logger log = LoggerFactory.getLogger(ProducerImpl.class); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerResponse.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerResponse.java index 36b47f2b6d62f..2c9cfa74d1aee 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerResponse.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerResponse.java @@ -21,9 +21,9 @@ import java.util.Optional; import lombok.AllArgsConstructor; -import lombok.Data; +import lombok.Getter; -@Data +@Getter @AllArgsConstructor public class ProducerResponse { private String producerName; @@ -31,4 +31,14 @@ public class ProducerResponse { private byte[] schemaVersion; private Optional topicEpoch; + + // Shadow the default getter generated by lombok. In broker, if the schema version is an empty byte array, it means + // the topic doesn't have schema. + public byte[] getSchemaVersion() { + if (schemaVersion != null && schemaVersion.length != 0) { + return schemaVersion; + } else { + return null; + } + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImpl.java index faf73cb3e2788..3acefa312807d 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImpl.java @@ -99,7 +99,7 @@ private void init(ProducerConfigurationData conf) { try { log.info("Starting Pulsar producer perf with config: {}", w.writeValueAsString(conf)); - log.info("Pulsar client config: {}", w.withoutAttribute("authentication").writeValueAsString(pulsarClient.getConfiguration())); + log.info("Pulsar client config: {}", w.writeValueAsString(pulsarClient.getConfiguration())); } catch (IOException e) { log.error("Failed to dump config info", e); } @@ -111,49 +111,7 @@ private void init(ProducerConfigurationData conf) { } try { - long now = System.nanoTime(); - double elapsed = (now - oldTime) / 1e9; - oldTime = now; - - long currentNumMsgsSent = numMsgsSent.sumThenReset(); - long currentNumBytesSent = numBytesSent.sumThenReset(); - long currentNumSendFailedMsgs = numSendFailed.sumThenReset(); - long currentNumAcksReceived = numAcksReceived.sumThenReset(); - - totalMsgsSent.add(currentNumMsgsSent); - totalBytesSent.add(currentNumBytesSent); - totalSendFailed.add(currentNumSendFailedMsgs); - totalAcksReceived.add(currentNumAcksReceived); - - synchronized (ds) { - latencyPctValues = ds.getQuantiles(PERCENTILES); - ds.reset(); - } - - sendMsgsRate = currentNumMsgsSent / elapsed; - sendBytesRate = currentNumBytesSent / elapsed; - - if ((currentNumMsgsSent | currentNumSendFailedMsgs | currentNumAcksReceived - | currentNumMsgsSent) != 0) { - - for (int i = 0; i < latencyPctValues.length; i++) { - if (Double.isNaN(latencyPctValues[i])) { - latencyPctValues[i] = 0; - } - } - - log.info("[{}] [{}] Pending messages: {} --- Publish throughput: {} msg/s --- {} Mbit/s --- " - + "Latency: med: {} ms - 95pct: {} ms - 99pct: {} ms - 99.9pct: {} ms - max: {} ms --- " - + "Ack received rate: {} ack/s --- Failed messages: {}", producer.getTopic(), - producer.getProducerName(), producer.getPendingQueueSize(), - THROUGHPUT_FORMAT.format(sendMsgsRate), - THROUGHPUT_FORMAT.format(sendBytesRate / 1024 / 1024 * 8), - DEC.format(latencyPctValues[0]), DEC.format(latencyPctValues[2]), - DEC.format(latencyPctValues[3]), DEC.format(latencyPctValues[4]), - DEC.format(latencyPctValues[5]), - THROUGHPUT_FORMAT.format(currentNumAcksReceived / elapsed), currentNumSendFailedMsgs); - } - + updateStats(); } catch (Exception e) { log.error("[{}] [{}]: {}", producer.getTopic(), producer.getProducerName(), e.getMessage()); } finally { @@ -171,6 +129,52 @@ Timeout getStatTimeout() { return statTimeout; } + protected void updateStats() { + long now = System.nanoTime(); + double elapsed = (now - oldTime) / 1e9; + oldTime = now; + + long currentNumMsgsSent = numMsgsSent.sumThenReset(); + long currentNumBytesSent = numBytesSent.sumThenReset(); + long currentNumSendFailedMsgs = numSendFailed.sumThenReset(); + long currentNumAcksReceived = numAcksReceived.sumThenReset(); + + totalMsgsSent.add(currentNumMsgsSent); + totalBytesSent.add(currentNumBytesSent); + totalSendFailed.add(currentNumSendFailedMsgs); + totalAcksReceived.add(currentNumAcksReceived); + + synchronized (ds) { + latencyPctValues = ds.getQuantiles(PERCENTILES); + ds.reset(); + } + + sendMsgsRate = currentNumMsgsSent / elapsed; + sendBytesRate = currentNumBytesSent / elapsed; + + if ((currentNumMsgsSent | currentNumSendFailedMsgs | currentNumAcksReceived + | currentNumMsgsSent) != 0) { + + for (int i = 0; i < latencyPctValues.length; i++) { + if (Double.isNaN(latencyPctValues[i])) { + latencyPctValues[i] = 0; + } + } + + log.info("[{}] [{}] Pending messages: {} --- Publish throughput: {} msg/s --- {} Mbit/s --- " + + "Latency: med: {} ms - 95pct: {} ms - 99pct: {} ms - 99.9pct: {} ms - max: {} ms --- " + + "Ack received rate: {} ack/s --- Failed messages: {} --- Pending messages: {}", + producer.getTopic(), producer.getProducerName(), producer.getPendingQueueSize(), + THROUGHPUT_FORMAT.format(sendMsgsRate), + THROUGHPUT_FORMAT.format(sendBytesRate / 1024 / 1024 * 8), + DEC.format(latencyPctValues[0]), DEC.format(latencyPctValues[2]), + DEC.format(latencyPctValues[3]), DEC.format(latencyPctValues[4]), + DEC.format(latencyPctValues[5]), + THROUGHPUT_FORMAT.format(currentNumAcksReceived / elapsed), currentNumSendFailedMsgs, + producer.getPendingQueueSize()); + } + } + @Override public void updateNumMsgsSent(long numMsgs, long totalMsgsSize) { numMsgsSent.add(numMsgs); @@ -297,6 +301,7 @@ public double getSendLatencyMillisMax() { } public void cancelStatsTimeout() { + this.updateStats(); if (statTimeout != null) { statTimeout.cancel(); statTimeout = null; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarChannelInitializer.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarChannelInitializer.java index 1353424b1b4ea..bac1cd9ba419f 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarChannelInitializer.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarChannelInitializer.java @@ -18,14 +18,24 @@ */ package org.apache.pulsar.client.impl; +import io.netty.channel.Channel; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.SslProvider; import java.net.InetSocketAddress; import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - import io.netty.handler.proxy.Socks5ProxyHandler; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.AuthenticationDataProvider; +import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.client.util.ObjectCache; import org.apache.pulsar.common.protocol.ByteBufPair; @@ -33,15 +43,6 @@ import org.apache.pulsar.common.util.SecurityUtility; import org.apache.pulsar.common.util.keystoretls.NettySSLContextAutoRefreshBuilder; -import io.netty.channel.Channel; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.socket.SocketChannel; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import io.netty.handler.ssl.SslContext; -import io.netty.handler.ssl.SslHandler; -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; - @Slf4j public class PulsarChannelInitializer extends ChannelInitializer { @@ -50,6 +51,7 @@ public class PulsarChannelInitializer extends ChannelInitializer private final Supplier clientCnxSupplier; @Getter private final boolean tlsEnabled; + private final boolean tlsHostnameVerificationEnabled; private final boolean tlsEnabledWithKeyStore; private final InetSocketAddress socks5ProxyAddress; private final String socks5ProxyUsername; @@ -65,6 +67,7 @@ public PulsarChannelInitializer(ClientConfigurationData conf, Supplier(() -> { try { + SslProvider sslProvider = null; + if (conf.getSslProvider() != null) { + sslProvider = SslProvider.valueOf(conf.getSslProvider()); + } + // Set client certificate if available AuthenticationDataProvider authData = conf.getAuthentication().getAuthData(); if (authData.hasDataForTls()) { return authData.getTlsTrustStoreStream() == null - ? SecurityUtility.createNettySslContextForClient(conf.isTlsAllowInsecureConnection(), - conf.getTlsTrustCertsFilePath(), - authData.getTlsCertificates(), authData.getTlsPrivateKey()) - : SecurityUtility.createNettySslContextForClient(conf.isTlsAllowInsecureConnection(), - authData.getTlsTrustStoreStream(), - authData.getTlsCertificates(), authData.getTlsPrivateKey()); + ? SecurityUtility.createNettySslContextForClient( + sslProvider, + conf.isTlsAllowInsecureConnection(), + conf.getTlsTrustCertsFilePath(), + authData.getTlsCertificates(), + authData.getTlsPrivateKey(), + conf.getTlsCiphers(), + conf.getTlsProtocols()) + : SecurityUtility.createNettySslContextForClient(sslProvider, + conf.isTlsAllowInsecureConnection(), + authData.getTlsTrustStoreStream(), + authData.getTlsCertificates(), authData.getTlsPrivateKey(), + conf.getTlsCiphers(), + conf.getTlsProtocols()); } else { - return SecurityUtility.createNettySslContextForClient(conf.isTlsAllowInsecureConnection(), - conf.getTlsTrustCertsFilePath()); + return SecurityUtility.createNettySslContextForClient( + sslProvider, + conf.isTlsAllowInsecureConnection(), + conf.getTlsTrustCertsFilePath(), + conf.getTlsCiphers(), + conf.getTlsProtocols()); } } catch (Exception e) { throw new RuntimeException("Failed to create TLS context", e); @@ -146,6 +169,11 @@ CompletableFuture initTls(Channel ch, InetSocketAddress sniHost) { ? new SslHandler(nettySSLContextAutoRefreshBuilder.get() .createSSLEngine(sniHost.getHostString(), sniHost.getPort())) : sslContextSupplier.get().newHandler(ch.alloc(), sniHost.getHostString(), sniHost.getPort()); + + if (tlsHostnameVerificationEnabled) { + SecurityUtility.configureSSLHandler(handler); + } + ch.pipeline().addFirst(TLS_HANDLER, handler); initTlsFuture.complete(ch); } catch (Throwable t) { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java index 1234b8bbf3791..9a4bada327899 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImpl.java @@ -19,18 +19,16 @@ package org.apache.pulsar.client.impl; import static org.apache.commons.lang3.StringUtils.isBlank; - import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.collect.Lists; - import io.netty.channel.EventLoopGroup; import io.netty.util.HashedWheelTimer; import io.netty.util.Timer; import io.netty.util.concurrent.DefaultThreadFactory; - +import java.net.InetSocketAddress; import java.time.Clock; import java.util.ArrayList; import java.util.Collections; @@ -49,7 +47,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; import java.util.stream.Collectors; - +import lombok.Builder; import lombok.Getter; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.AuthenticationFactory; @@ -77,6 +75,7 @@ import org.apache.pulsar.client.impl.transaction.TransactionBuilderImpl; import org.apache.pulsar.client.impl.transaction.TransactionCoordinatorClientImpl; import org.apache.pulsar.client.util.ExecutorProvider; +import org.apache.pulsar.client.util.ScheduledExecutorProvider; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace.Mode; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicDomain; @@ -93,12 +92,16 @@ public class PulsarClientImpl implements PulsarClient { private static final Logger log = LoggerFactory.getLogger(PulsarClientImpl.class); protected final ClientConfigurationData conf; + private final boolean createdExecutorProviders; private LookupService lookup; private final ConnectionPool cnxPool; + @Getter private final Timer timer; private boolean needStopTimer; private final ExecutorProvider externalExecutorProvider; - private final ExecutorProvider internalExecutorService; + private final ExecutorProvider internalExecutorProvider; + + private final ScheduledExecutorProvider scheduledExecutorProvider; private final boolean createdEventLoopGroup; private final boolean createdCnxPool; @@ -113,20 +116,22 @@ public enum State { private final AtomicLong producerIdGenerator = new AtomicLong(); private final AtomicLong consumerIdGenerator = new AtomicLong(); - private final AtomicLong requestIdGenerator - = new AtomicLong(ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE/2)); + private final AtomicLong requestIdGenerator = + new AtomicLong(ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE / 2)); protected final EventLoopGroup eventLoopGroup; private final MemoryLimitController memoryLimitController; - private final LoadingCache schemaProviderLoadingCache = CacheBuilder.newBuilder().maximumSize(100000) - .expireAfterAccess(30, TimeUnit.MINUTES).build(new CacheLoader() { + private final LoadingCache schemaProviderLoadingCache = + CacheBuilder.newBuilder().maximumSize(100000) + .expireAfterAccess(30, TimeUnit.MINUTES) + .build(new CacheLoader() { - @Override - public SchemaInfoProvider load(String topicName) { - return newSchemaProvider(topicName); - } - }); + @Override + public SchemaInfoProvider load(String topicName) { + return newSchemaProvider(topicName); + } + }); private final Clock clientClock; @@ -134,48 +139,61 @@ public SchemaInfoProvider load(String topicName) { private TransactionCoordinatorClientImpl tcClient; public PulsarClientImpl(ClientConfigurationData conf) throws PulsarClientException { - this(conf, getEventLoopGroup(conf), true); + this(conf, null, null, null, null, null); } public PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopGroup) throws PulsarClientException { - this(conf, eventLoopGroup, new ConnectionPool(conf, eventLoopGroup), null, false, true); + this(conf, eventLoopGroup, null, null, null, null); } public PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, ConnectionPool cnxPool) throws PulsarClientException { - this(conf, eventLoopGroup, cnxPool, null, false, false); - } - - public PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, ConnectionPool cnxPool, Timer timer) - throws PulsarClientException { - this(conf, eventLoopGroup, cnxPool, timer, false, false); + this(conf, eventLoopGroup, cnxPool, null, null, null); } - private PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, boolean createdEventLoopGroup) + public PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, ConnectionPool cnxPool, + Timer timer) throws PulsarClientException { - this(conf, eventLoopGroup, new ConnectionPool(conf, eventLoopGroup), null, createdEventLoopGroup, true); + this(conf, eventLoopGroup, cnxPool, timer, null, null); } - private PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, ConnectionPool cnxPool, Timer timer, - boolean createdEventLoopGroup, boolean createdCnxPool) throws PulsarClientException { + @Builder(builderClassName = "PulsarClientImplBuilder") + private PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, ConnectionPool connectionPool, + Timer timer, ExecutorProvider externalExecutorProvider, + ExecutorProvider internalExecutorProvider) throws PulsarClientException { + EventLoopGroup eventLoopGroupReference = null; + ConnectionPool connectionPoolReference = null; try { - this.createdEventLoopGroup = createdEventLoopGroup; - this.createdCnxPool = createdCnxPool; - if (conf == null || isBlank(conf.getServiceUrl()) || eventLoopGroup == null) { + this.createdEventLoopGroup = eventLoopGroup == null; + this.createdCnxPool = connectionPool == null; + if ((externalExecutorProvider == null) != (internalExecutorProvider == null)) { + throw new IllegalArgumentException( + "Both externalExecutorProvider and internalExecutorProvider must be specified or unspecified."); + } + this.createdExecutorProviders = externalExecutorProvider == null; + eventLoopGroupReference = eventLoopGroup != null ? eventLoopGroup : getEventLoopGroup(conf); + this.eventLoopGroup = eventLoopGroupReference; + if (conf == null || isBlank(conf.getServiceUrl()) || this.eventLoopGroup == null) { throw new PulsarClientException.InvalidConfigurationException("Invalid client configuration"); } - this.eventLoopGroup = eventLoopGroup; setAuth(conf); this.conf = conf; clientClock = conf.getClock(); conf.getAuthentication().start(); - this.cnxPool = cnxPool; - externalExecutorProvider = new ExecutorProvider(conf.getNumListenerThreads(), "pulsar-external-listener"); - internalExecutorService = new ExecutorProvider(conf.getNumIoThreads(), "pulsar-client-internal"); + connectionPoolReference = + connectionPool != null ? connectionPool : new ConnectionPool(conf, this.eventLoopGroup); + this.cnxPool = connectionPoolReference; + this.externalExecutorProvider = externalExecutorProvider != null ? externalExecutorProvider : + new ExecutorProvider(conf.getNumListenerThreads(), "pulsar-external-listener"); + this.internalExecutorProvider = internalExecutorProvider != null ? internalExecutorProvider : + new ExecutorProvider(conf.getNumIoThreads(), "pulsar-client-internal"); + this.scheduledExecutorProvider = new ScheduledExecutorProvider(conf.getNumIoThreads(), + "pulsar-client-scheduled"); if (conf.getServiceUrl().startsWith("http")) { - lookup = new HttpLookupService(conf, eventLoopGroup); + lookup = new HttpLookupService(conf, this.eventLoopGroup); } else { - lookup = new BinaryProtoLookupService(this, conf.getServiceUrl(), conf.getListenerName(), conf.isUseTls(), externalExecutorProvider.getExecutor()); + lookup = new BinaryProtoLookupService(this, conf.getServiceUrl(), conf.getListenerName(), + conf.isUseTls(), this.externalExecutorProvider.getExecutor()); } if (timer == null) { this.timer = new HashedWheelTimer(getThreadFactory("pulsar-timer"), 1, TimeUnit.MILLISECONDS); @@ -198,8 +216,8 @@ private PulsarClientImpl(ClientConfigurationData conf, EventLoopGroup eventLoopG state.set(State.Open); } catch (Throwable t) { shutdown(); - shutdownEventLoopGroup(eventLoopGroup); - closeCnxPool(cnxPool); + shutdownEventLoopGroup(eventLoopGroupReference); + closeCnxPool(connectionPoolReference); throw t; } } @@ -277,11 +295,13 @@ public CompletableFuture> createProducerAsync(ProducerConfigurat if (schema instanceof AutoConsumeSchema) { return FutureUtil.failedFuture( - new PulsarClientException.InvalidConfigurationException("AutoConsumeSchema is only used by consumers to detect schemas automatically")); + new PulsarClientException.InvalidConfigurationException( + "AutoConsumeSchema is only used by consumers to detect schemas automatically")); } if (state.get() != State.Open) { - return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Client already closed : state = " + state.get())); + return FutureUtil.failedFuture( + new PulsarClientException.AlreadyClosedException("Client already closed : state = " + state.get())); } String topic = conf.getTopicName(); @@ -358,7 +378,8 @@ protected PartitionedProducerImpl newPartitionedProducerImpl(String topic ProducerConfigurationData conf, Schema schema, ProducerInterceptors interceptors, - CompletableFuture> producerCreatedFuture, + CompletableFuture> + producerCreatedFuture, PartitionedTopicMetadata metadata) { return new PartitionedProducerImpl<>(PulsarClientImpl.this, topic, conf, metadata.partitions, producerCreatedFuture, schema, interceptors); @@ -392,7 +413,8 @@ public CompletableFuture> subscribeAsync(ConsumerConfigurationD return subscribeAsync(conf, Schema.BYTES, null); } - public CompletableFuture> subscribeAsync(ConsumerConfigurationData conf, Schema schema, ConsumerInterceptors interceptors) { + public CompletableFuture> subscribeAsync(ConsumerConfigurationData conf, Schema schema, + ConsumerInterceptors interceptors) { if (state.get() != State.Open) { return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Client already closed")); } @@ -404,7 +426,8 @@ public CompletableFuture> subscribeAsync(ConsumerConfigurationDa for (String topic : conf.getTopicNames()) { if (!TopicName.isValid(topic)) { - return FutureUtil.failedFuture(new PulsarClientException.InvalidTopicNameException("Invalid topic name: '" + topic + "'")); + return FutureUtil.failedFuture( + new PulsarClientException.InvalidTopicNameException("Invalid topic name: '" + topic + "'")); } } @@ -440,12 +463,16 @@ public CompletableFuture> subscribeAsync(ConsumerConfigurationDa } } - private CompletableFuture> singleTopicSubscribeAsync(ConsumerConfigurationData conf, Schema schema, ConsumerInterceptors interceptors) { + private CompletableFuture> singleTopicSubscribeAsync(ConsumerConfigurationData conf, + Schema schema, + ConsumerInterceptors interceptors) { return preProcessSchemaBeforeSubscribe(this, schema, conf.getSingleTopic()) - .thenCompose(schemaClone -> doSingleTopicSubscribeAsync(conf, schemaClone, interceptors)); + .thenCompose(schemaClone -> doSingleTopicSubscribeAsync(conf, schemaClone, interceptors)); } - private CompletableFuture> doSingleTopicSubscribeAsync(ConsumerConfigurationData conf, Schema schema, ConsumerInterceptors interceptors) { + private CompletableFuture> doSingleTopicSubscribeAsync(ConsumerConfigurationData conf, + Schema schema, + ConsumerInterceptors interceptors) { CompletableFuture> consumerSubscribedFuture = new CompletableFuture<>(); String topic = conf.getSingleTopic(); @@ -475,7 +502,9 @@ private CompletableFuture> doSingleTopicSubscribeAsync(ConsumerC return consumerSubscribedFuture; } - private CompletableFuture> multiTopicSubscribeAsync(ConsumerConfigurationData conf, Schema schema, ConsumerInterceptors interceptors) { + private CompletableFuture> multiTopicSubscribeAsync(ConsumerConfigurationData conf, + Schema schema, + ConsumerInterceptors interceptors) { CompletableFuture> consumerSubscribedFuture = new CompletableFuture<>(); ConsumerBase consumer = new MultiTopicsConsumerImpl<>(PulsarClientImpl.this, conf, @@ -502,9 +531,9 @@ private CompletableFuture> patternTopicSubscribeAsync(ConsumerCo lookup.getTopicsUnderNamespace(namespaceName, subscriptionMode) .thenAccept(topics -> { if (log.isDebugEnabled()) { - log.debug("Get topics under namespace {}, topics.size: {}", namespaceName.toString(), topics.size()); + log.debug("Get topics under namespace {}, topics.size: {}", namespaceName, topics.size()); topics.forEach(topicName -> - log.debug("Get topics under namespace {}, topic: {}", namespaceName.toString(), topicName)); + log.debug("Get topics under namespace {}, topic: {}", namespaceName, topicName)); } List topicsList = topicsPatternFilter(topics, conf.getTopicsPattern()); @@ -598,7 +627,8 @@ protected CompletableFuture> createSingleTopicReaderAsync( if (log.isDebugEnabled()) { log.debug("[{}] Received topic metadata. partitions: {}", topic, metadata.partitions); } - if (metadata.partitions > 0 && MultiTopicsConsumerImpl.isIllegalMultiTopicsMessageId(conf.getStartMessageId())) { + if (metadata.partitions > 0 && + MultiTopicsConsumerImpl.isIllegalMultiTopicsMessageId(conf.getStartMessageId())) { readerFuture.completeExceptionally( new PulsarClientException("The partitioned topic startMessageId is illegal")); return; @@ -611,7 +641,8 @@ protected CompletableFuture> createSingleTopicReaderAsync( conf, externalExecutorProvider, consumerSubscribedFuture, schema); consumer = ((MultiTopicsReaderImpl) reader).getMultiTopicsConsumer(); } else { - reader = new ReaderImpl<>(PulsarClientImpl.this, conf, externalExecutorProvider, consumerSubscribedFuture, schema); + reader = new ReaderImpl<>(PulsarClientImpl.this, conf, externalExecutorProvider, + consumerSubscribedFuture, schema); consumer = ((ReaderImpl) reader).getConsumer(); } @@ -642,7 +673,8 @@ public CompletableFuture> getSchema(String topic) { topicName = TopicName.get(topic); } catch (Throwable t) { return FutureUtil - .failedFuture(new PulsarClientException.InvalidTopicNameException("Invalid topic name: '" + topic + "'")); + .failedFuture( + new PulsarClientException.InvalidTopicNameException("Invalid topic name: '" + topic + "'")); } return lookup.getSchema(topicName); @@ -712,6 +744,14 @@ public void shutdown() throws PulsarClientException { throwable = t; } } + if (tcClient != null) { + try { + tcClient.close(); + } catch (Throwable t) { + log.warn("Failed to close tcClient"); + throwable = t; + } + } try { // Shutting down eventLoopGroup separately because in some cases, cnxPool might be using different // eventLoopGroup. @@ -747,14 +787,6 @@ public void shutdown() throws PulsarClientException { throwable = t; } } - if (tcClient != null) { - try { - tcClient.close(); - } catch (Throwable t) { - log.warn("Failed to close tcClient"); - throwable = t; - } - } if (throwable != null) { throw throwable; } @@ -785,27 +817,29 @@ private void shutdownEventLoopGroup(EventLoopGroup eventLoopGroup) throws Pulsar } private void shutdownExecutors() throws PulsarClientException { - PulsarClientException pulsarClientException = null; + if (createdExecutorProviders) { + PulsarClientException pulsarClientException = null; - if (externalExecutorProvider != null && !externalExecutorProvider.isShutdown()) { - try { - externalExecutorProvider.shutdownNow(); - } catch (Throwable t) { - log.warn("Failed to shutdown externalExecutorProvider", t); - pulsarClientException = PulsarClientException.unwrap(t); + if (externalExecutorProvider != null && !externalExecutorProvider.isShutdown()) { + try { + externalExecutorProvider.shutdownNow(); + } catch (Throwable t) { + log.warn("Failed to shutdown externalExecutorProvider", t); + pulsarClientException = PulsarClientException.unwrap(t); + } } - } - if (internalExecutorService != null && !internalExecutorService.isShutdown()) { - try { - internalExecutorService.shutdownNow(); - } catch (Throwable t) { - log.warn("Failed to shutdown internalExecutorService", t); - pulsarClientException = PulsarClientException.unwrap(t); + if (internalExecutorProvider != null && !internalExecutorProvider.isShutdown()) { + try { + internalExecutorProvider.shutdownNow(); + } catch (Throwable t) { + log.warn("Failed to shutdown internalExecutorService", t); + pulsarClientException = PulsarClientException.unwrap(t); + } } - } - if (pulsarClientException != null) { - throw pulsarClientException; + if (pulsarClientException != null) { + throw pulsarClientException; + } } } @@ -827,7 +861,12 @@ public synchronized void updateServiceUrl(String serviceUrl) throws PulsarClient public CompletableFuture getConnection(final String topic) { TopicName topicName = TopicName.get(topic); return lookup.getBroker(topicName) - .thenCompose(pair -> cnxPool.getConnection(pair.getLeft(), pair.getRight())); + .thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight())); + } + + public CompletableFuture getConnection(final InetSocketAddress logicalAddress, + final InetSocketAddress physicalAddress) { + return cnxPool.getConnection(logicalAddress, physicalAddress); } /** visible for pulsar-functions **/ @@ -884,9 +923,9 @@ public CompletableFuture getPartitionedTopicMetadata(S TopicName topicName = TopicName.get(topic); AtomicLong opTimeoutMs = new AtomicLong(conf.getLookupTimeoutMs()); Backoff backoff = new BackoffBuilder() - .setInitialTime(100, TimeUnit.MILLISECONDS) + .setInitialTime(conf.getInitialBackoffIntervalNanos(), TimeUnit.NANOSECONDS) .setMandatoryStop(opTimeoutMs.get() * 2, TimeUnit.MILLISECONDS) - .setMax(1, TimeUnit.MINUTES) + .setMax(conf.getMaxBackoffIntervalNanos(), TimeUnit.NANOSECONDS) .create(); getPartitionedTopicMetadata(topicName, backoff, opTimeoutMs, metadataFuture, new ArrayList<>()); @@ -915,9 +954,9 @@ private void getPartitionedTopicMetadata(TopicName topicName, } previousExceptions.add(e); - ((ScheduledExecutorService) externalExecutorProvider.getExecutor()).schedule(() -> { - log.warn("[topic: {}] Could not get connection while getPartitionedTopicMetadata -- Will try again in {} ms", - topicName, nextDelay); + ((ScheduledExecutorService) scheduledExecutorProvider.getExecutor()).schedule(() -> { + log.warn("[topic: {}] Could not get connection while getPartitionedTopicMetadata -- " + + "Will try again in {} ms", topicName, nextDelay); remainingTime.addAndGet(-nextDelay); getPartitionedTopicMetadata(topicName, backoff, remainingTime, future, previousExceptions); }, nextDelay, TimeUnit.MILLISECONDS); @@ -1035,8 +1074,13 @@ protected CompletableFuture> preProcessSchemaBeforeSubscribe(Pulsa } public ExecutorService getInternalExecutorService() { - return internalExecutorService.getExecutor(); + return internalExecutorProvider.getExecutor(); + } + + public ScheduledExecutorProvider getScheduledExecutorProvider() { + return scheduledExecutorProvider; } + // // Transaction related API // @@ -1050,5 +1094,4 @@ public TransactionBuilder newTransaction() throws PulsarClientException { } return new TransactionBuilderImpl(this, tcClient); } - } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java index c146f238d55fc..2747d39a7357e 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PulsarClientImplementationBindingImpl.java @@ -45,35 +45,7 @@ import org.apache.pulsar.client.api.schema.SchemaDefinitionBuilder; import org.apache.pulsar.client.impl.auth.AuthenticationTls; import org.apache.pulsar.client.impl.auth.AuthenticationToken; -import org.apache.pulsar.client.impl.schema.AutoConsumeSchema; -import org.apache.pulsar.client.impl.schema.AutoProduceBytesSchema; -import org.apache.pulsar.client.impl.schema.AvroSchema; -import org.apache.pulsar.client.impl.schema.BooleanSchema; -import org.apache.pulsar.client.impl.schema.ByteBufferSchema; -import org.apache.pulsar.client.impl.schema.ByteSchema; -import org.apache.pulsar.client.impl.schema.BytesSchema; -import org.apache.pulsar.client.impl.schema.DateSchema; -import org.apache.pulsar.client.impl.schema.DoubleSchema; -import org.apache.pulsar.client.impl.schema.FloatSchema; -import org.apache.pulsar.client.impl.schema.InstantSchema; -import org.apache.pulsar.client.impl.schema.IntSchema; -import org.apache.pulsar.client.impl.schema.JSONSchema; -import org.apache.pulsar.client.impl.schema.KeyValueSchemaImpl; -import org.apache.pulsar.client.impl.schema.KeyValueSchemaInfo; -import org.apache.pulsar.client.impl.schema.LocalDateSchema; -import org.apache.pulsar.client.impl.schema.LocalDateTimeSchema; -import org.apache.pulsar.client.impl.schema.LocalTimeSchema; -import org.apache.pulsar.client.impl.schema.LongSchema; -import org.apache.pulsar.client.impl.schema.NativeAvroBytesSchema; -import org.apache.pulsar.client.impl.schema.ProtobufNativeSchema; -import org.apache.pulsar.client.impl.schema.ProtobufSchema; -import org.apache.pulsar.client.impl.schema.RecordSchemaBuilderImpl; -import org.apache.pulsar.client.impl.schema.SchemaDefinitionBuilderImpl; -import org.apache.pulsar.client.impl.schema.SchemaUtils; -import org.apache.pulsar.client.impl.schema.ShortSchema; -import org.apache.pulsar.client.impl.schema.StringSchema; -import org.apache.pulsar.client.impl.schema.TimeSchema; -import org.apache.pulsar.client.impl.schema.TimestampSchema; +import org.apache.pulsar.client.impl.schema.*; import org.apache.pulsar.client.impl.schema.generic.GenericProtobufNativeSchema; import org.apache.pulsar.client.impl.schema.generic.GenericSchemaImpl; import org.apache.pulsar.client.internal.PulsarClientImplementationBinding; @@ -383,4 +355,9 @@ public BatcherBuilder newKeyBasedBatcherBuilder() { public MessagePayloadFactory newDefaultMessagePayloadFactory() { return new MessagePayloadFactoryImpl(); } + + public SchemaInfo newSchemaInfoImpl(String name, byte[] schema, SchemaType type, long timestamp, + Map propertiesValue) { + return new SchemaInfoImpl(name, schema, type, timestamp, propertiesValue); + } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderImpl.java index 37d346ef93209..72c74e8875dde 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ReaderImpl.java @@ -116,7 +116,7 @@ public void reachedEndOfTopic(Consumer consumer) { final int partitionIdx = TopicName.getPartitionIndex(readerConfiguration.getTopicName()); consumer = new ConsumerImpl<>(client, readerConfiguration.getTopicName(), consumerConfiguration, - executorProvider, partitionIdx, false, consumerFuture, + executorProvider, partitionIdx, false, false, consumerFuture, readerConfiguration.getStartMessageId(), readerConfiguration.getStartMessageFromRollbackDurationInSec(), schema, null, true /* createTopicIfDoesNotExist */); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java index f96cf57867a70..73df89dd6ae6a 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java @@ -18,11 +18,14 @@ */ package org.apache.pulsar.client.impl; +import com.google.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; import io.netty.util.Recycler; import io.netty.util.ReferenceCountUtil; import io.netty.util.Timeout; +import io.netty.util.Timer; import io.netty.util.TimerTask; +import java.util.concurrent.ExecutorService; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException; import org.apache.pulsar.client.api.transaction.TxnID; @@ -57,9 +60,15 @@ public class TransactionMetaStoreHandler extends HandlerState implements Connect private final long transactionCoordinatorId; private final ConnectionHandler connectionHandler; private final ConcurrentLongHashMap> pendingRequests = - new ConcurrentLongHashMap<>(16, 1); + ConcurrentLongHashMap.>newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); private final ConcurrentLinkedQueue timeoutQueue; + protected final Timer timer; + private final ExecutorService internalPinnedExecutor; + private static class RequestTime { final long creationTimeMs; final long requestId; @@ -94,6 +103,11 @@ public TransactionMetaStoreHandler(long transactionCoordinatorId, PulsarClientIm .create(), this); this.connectFuture = connectFuture; + this.internalPinnedExecutor = pulsarClient.getInternalExecutorService(); + this.timer = pulsarClient.timer(); + } + + public void start() { this.connectionHandler.grabCnx(); } @@ -108,65 +122,74 @@ public void connectionFailed(PulsarClientException exception) { @Override public void connectionOpened(ClientCnx cnx) { - LOG.info("Transaction meta handler with transaction coordinator id {} connection opened.", - transactionCoordinatorId); - - if (getState() == State.Closing || getState() == State.Closed) { - setState(State.Closed); - failPendingRequest(); - this.pendingRequests.clear(); - return; - } - - connectionHandler.setClientCnx(cnx); - cnx.registerTransactionMetaStoreHandler(transactionCoordinatorId, this); - - // if broker protocol version < 19, don't send TcClientConnectRequest to broker. - if (cnx.getRemoteEndpointProtocolVersion() > ProtocolVersion.v18.getValue()) { - long requestId = client.newRequestId(); - ByteBuf request = Commands.newTcClientConnectRequest(transactionCoordinatorId, requestId); + internalPinnedExecutor.execute(() -> { + LOG.info("Transaction meta handler with transaction coordinator id {} connection opened.", + transactionCoordinatorId); + + if (getState() == State.Closing || getState() == State.Closed) { + setState(State.Closed); + failPendingRequest(); + return; + } - cnx.sendRequestWithId(request, requestId).thenRun(() -> { - LOG.info("Transaction coordinator client connect success! tcId : {}", transactionCoordinatorId); + // if broker protocol version < 19, don't send TcClientConnectRequest to broker. + if (cnx.getRemoteEndpointProtocolVersion() > ProtocolVersion.v18.getValue()) { + long requestId = client.newRequestId(); + ByteBuf request = Commands.newTcClientConnectRequest(transactionCoordinatorId, requestId); + + cnx.sendRequestWithId(request, requestId).thenRun(() -> { + internalPinnedExecutor.execute(() -> { + LOG.info("Transaction coordinator client connect success! tcId : {}", transactionCoordinatorId); + if (!changeToReadyState()) { + setState(State.Closed); + cnx.channel().close(); + } + + connectionHandler.setClientCnx(cnx); + cnx.registerTransactionMetaStoreHandler(transactionCoordinatorId, this); + if (!this.connectFuture.isDone()) { + this.connectFuture.complete(null); + } + this.connectionHandler.resetBackoff(); + pendingRequests.forEach((requestID, opBase) -> checkStateAndSendRequest(opBase)); + }); + }).exceptionally((e) -> { + internalPinnedExecutor.execute(() -> { + LOG.error("Transaction coordinator client connect fail! tcId : {}", + transactionCoordinatorId, e.getCause()); + if (getState() == State.Closing || getState() == State.Closed + || e.getCause() instanceof PulsarClientException.NotAllowedException) { + setState(State.Closed); + cnx.channel().close(); + } else { + connectionHandler.reconnectLater(e.getCause()); + } + }); + return null; + }); + } else { if (!changeToReadyState()) { - setState(State.Closed); - cnx.channel().close(); - } - - if (!this.connectFuture.isDone()) { - this.connectFuture.complete(null); - } - this.connectionHandler.resetBackoff(); - }).exceptionally((e) -> { - LOG.error("Transaction coordinator client connect fail! tcId : {}", - transactionCoordinatorId, e.getCause()); - if (getState() == State.Closing || getState() == State.Closed - || e.getCause() instanceof PulsarClientException.NotAllowedException) { - setState(State.Closed); cnx.channel().close(); } else { - connectionHandler.reconnectLater(e.getCause()); + connectionHandler.setClientCnx(cnx); + cnx.registerTransactionMetaStoreHandler(transactionCoordinatorId, this); } - return null; - }); - } else { - if (!changeToReadyState()) { - cnx.channel().close(); + this.connectFuture.complete(null); } - this.connectFuture.complete(null); - } + }); } private void failPendingRequest() { - pendingRequests.keys().forEach(k -> { - OpBase op = pendingRequests.remove(k); + // this method is executed in internalPinnedExecutor. + pendingRequests.forEach((k, op) -> { if (op != null && !op.callback.isDone()) { op.callback.completeExceptionally(new PulsarClientException.AlreadyClosedException( - "Could not get response from transaction meta store when " + - "the transaction meta store has already close.")); + "Could not get response from transaction meta store when " + + "the transaction meta store has already close.")); onResponse(op); } }); + this.pendingRequests.clear(); } public CompletableFuture newTransactionAsync(long timeout, TimeUnit unit) { @@ -174,42 +197,80 @@ public CompletableFuture newTransactionAsync(long timeout, TimeUnit unit) LOG.debug("New transaction with timeout in ms {}", unit.toMillis(timeout)); } CompletableFuture callback = new CompletableFuture<>(); - if (!canSendRequest(callback)) { return callback; } long requestId = client.newRequestId(); ByteBuf cmd = Commands.newTxn(transactionCoordinatorId, requestId, unit.toMillis(timeout)); - OpForTxnIdCallBack op = OpForTxnIdCallBack.create(cmd, callback); - pendingRequests.put(requestId, op); - timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); - cmd.retain(); - cnx().ctx().writeAndFlush(cmd, cnx().ctx().voidPromise()); + OpForTxnIdCallBack op = OpForTxnIdCallBack.create(cmd, callback, client); + internalPinnedExecutor.execute(() -> { + pendingRequests.put(requestId, op); + timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); return callback; } void handleNewTxnResponse(CommandNewTxnResponse response) { - OpForTxnIdCallBack op = (OpForTxnIdCallBack) pendingRequests.remove(response.getRequestId()); - if (op == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Got new txn response for timeout {} - {}", response.getTxnidMostBits(), - response.getTxnidLeastBits()); + boolean hasError = response.hasError(); + ServerError error; + String message; + if (hasError) { + error = response.getError(); + message = response.getMessage(); + } else { + error = null; + message = null; + } + TxnID txnID = new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits()); + long requestId = response.getRequestId(); + internalPinnedExecutor.execute(() -> { + OpForTxnIdCallBack op = (OpForTxnIdCallBack) pendingRequests.remove(requestId); + if (op == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Got new txn response for timeout {} - {}", txnID.getMostSigBits(), + txnID.getLeastSigBits()); + } + return; } - return; - } - if (!response.hasError()) { - TxnID txnID = new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits()); - if (LOG.isDebugEnabled()) { - LOG.debug("Got new txn response {} for request {}", txnID, response.getRequestId()); + if (!hasError) { + if (LOG.isDebugEnabled()) { + LOG.debug("Got new txn response {} for request {}", txnID, requestId); + } + op.callback.complete(txnID); + } else { + if (checkIfNeedRetryByError(error, message, op)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Get a response for the {} request {} error " + + "TransactionCoordinatorNotFound and try it again", + BaseCommand.Type.NEW_TXN.name(), requestId); + } + pendingRequests.put(requestId, op); + timer.newTimeout(timeout -> { + internalPinnedExecutor.execute(() -> { + if (!pendingRequests.containsKey(requestId)) { + if (LOG.isDebugEnabled()) { + LOG.debug("The request {} already timeout", requestId); + } + return; + } + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); + } + , op.backoff.next(), TimeUnit.MILLISECONDS); + return; + } + LOG.error("Got {} for request {} error {}", BaseCommand.Type.NEW_TXN.name(), + requestId, error); } - op.callback.complete(txnID); - } else { - LOG.error("Got new txn for request {} error {}", response.getRequestId(), response.getError()); - handleTransactionFailOp(response.getError(), response.getMessage(), op); - } - onResponse(op); + onResponse(op); + }); } public CompletableFuture addPublishPartitionToTxnAsync(TxnID txnID, List partitions) { @@ -217,42 +278,84 @@ public CompletableFuture addPublishPartitionToTxnAsync(TxnID txnID, List callback = new CompletableFuture<>(); - if (!canSendRequest(callback)) { return callback; } long requestId = client.newRequestId(); ByteBuf cmd = Commands.newAddPartitionToTxn( requestId, txnID.getLeastSigBits(), txnID.getMostSigBits(), partitions); - OpForVoidCallBack op = OpForVoidCallBack.create(cmd, callback); - pendingRequests.put(requestId, op); - timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); - cmd.retain(); - cnx().ctx().writeAndFlush(cmd, cnx().ctx().voidPromise()); + OpForVoidCallBack op = OpForVoidCallBack + .create(cmd, callback, client); + internalPinnedExecutor.execute(() -> { + pendingRequests.put(requestId, op); + timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); + return callback; } void handleAddPublishPartitionToTxnResponse(CommandAddPartitionToTxnResponse response) { - OpForVoidCallBack op = (OpForVoidCallBack) pendingRequests.remove(response.getRequestId()); - if (op == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Got add publish partition to txn response for timeout {} - {}", response.getTxnidMostBits(), - response.getTxnidLeastBits()); + boolean hasError = response.hasError(); + ServerError error; + String message; + if (hasError) { + error = response.getError(); + message = response.getMessage(); + } else { + error = null; + message = null; + } + TxnID txnID = new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits()); + long requestId = response.getRequestId(); + internalPinnedExecutor.execute(() -> { + OpForVoidCallBack op = (OpForVoidCallBack) pendingRequests.remove(requestId); + if (op == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Got add publish partition to txn response for timeout {} - {}", txnID.getMostSigBits(), + txnID.getLeastSigBits()); + } + return; } - return; - } - if (!response.hasError()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Add publish partition for request {} success.", response.getRequestId()); + if (!hasError) { + if (LOG.isDebugEnabled()) { + LOG.debug("Add publish partition for request {} success.", requestId); + } + op.callback.complete(null); + } else { + if (checkIfNeedRetryByError(error, message, op)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Get a response for the {} request {} " + + " error TransactionCoordinatorNotFound and try it again", + BaseCommand.Type.ADD_PARTITION_TO_TXN.name(), requestId); + } + pendingRequests.put(requestId, op); + timer.newTimeout(timeout -> { + internalPinnedExecutor.execute(() -> { + if (!pendingRequests.containsKey(requestId)) { + if (LOG.isDebugEnabled()) { + LOG.debug("The request {} already timeout", requestId); + } + return; + } + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); + } + , op.backoff.next(), TimeUnit.MILLISECONDS); + return; + } + LOG.error("{} for request {} error {} with txnID {}.", BaseCommand.Type.ADD_PARTITION_TO_TXN.name(), + requestId, error, txnID); + } - op.callback.complete(null); - } else { - LOG.error("Add publish partition for request {} error {}.", response.getRequestId(), response.getError()); - handleTransactionFailOp(response.getError(), response.getMessage(), op); - } - onResponse(op); + onResponse(op); + }); } public CompletableFuture addSubscriptionToTxn(TxnID txnID, List subscriptionList) { @@ -261,41 +364,80 @@ public CompletableFuture addSubscriptionToTxn(TxnID txnID, List callback = new CompletableFuture<>(); - if (!canSendRequest(callback)) { return callback; } long requestId = client.newRequestId(); ByteBuf cmd = Commands.newAddSubscriptionToTxn( requestId, txnID.getLeastSigBits(), txnID.getMostSigBits(), subscriptionList); - OpForVoidCallBack op = OpForVoidCallBack.create(cmd, callback); - pendingRequests.put(requestId, op); - timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); - cmd.retain(); - cnx().ctx().writeAndFlush(cmd, cnx().ctx().voidPromise()); + OpForVoidCallBack op = OpForVoidCallBack.create(cmd, callback, client); + internalPinnedExecutor.execute(() -> { + pendingRequests.put(requestId, op); + timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); return callback; } public void handleAddSubscriptionToTxnResponse(CommandAddSubscriptionToTxnResponse response) { - OpForVoidCallBack op = (OpForVoidCallBack) pendingRequests.remove(response.getRequestId()); - if (op == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Add subscription to txn timeout for request {}.", response.getRequestId()); + boolean hasError = response.hasError(); + ServerError error; + String message; + if (hasError) { + error = response.getError(); + message = response.getMessage(); + } else { + error = null; + message = null; + } + long requestId = response.getRequestId(); + internalPinnedExecutor.execute(() -> { + OpForVoidCallBack op = (OpForVoidCallBack) pendingRequests.remove(requestId); + if (op == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Add subscription to txn timeout for request {}.", requestId); + } + return; } - return; - } - if (!response.hasError()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Add subscription to txn success for request {}.", response.getRequestId()); + if (!hasError) { + if (LOG.isDebugEnabled()) { + LOG.debug("Add subscription to txn success for request {}.", requestId); + } + op.callback.complete(null); + } else { + LOG.error("Add subscription to txn failed for request {} error {}.", + requestId, error); + if (checkIfNeedRetryByError(error, message, op)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Get a response for {} request {} error TransactionCoordinatorNotFound and try it again", + BaseCommand.Type.ADD_SUBSCRIPTION_TO_TXN.name(), requestId); + } + pendingRequests.put(requestId, op); + timer.newTimeout(timeout -> { + internalPinnedExecutor.execute(() -> { + if (!pendingRequests.containsKey(requestId)) { + if (LOG.isDebugEnabled()) { + LOG.debug("The request {} already timeout", requestId); + } + return; + } + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); + } + , op.backoff.next(), TimeUnit.MILLISECONDS); + return; + } + LOG.error("{} failed for request {} error {}.", BaseCommand.Type.ADD_SUBSCRIPTION_TO_TXN.name(), + requestId, error); + } - op.callback.complete(null); - } else { - LOG.error("Add subscription to txn failed for request {} error {}.", - response.getRequestId(), response.getError()); - handleTransactionFailOp(response.getError(), response.getMessage(), op); - } - onResponse(op); + onResponse(op); + }); } public CompletableFuture endTxnAsync(TxnID txnID, TxnAction action) { @@ -303,68 +445,119 @@ public CompletableFuture endTxnAsync(TxnID txnID, TxnAction action) { LOG.debug("End txn {}, action {}", txnID, action); } CompletableFuture callback = new CompletableFuture<>(); - if (!canSendRequest(callback)) { return callback; } long requestId = client.newRequestId(); BaseCommand cmd = Commands.newEndTxn(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits(), action); ByteBuf buf = Commands.serializeWithSize(cmd); - OpForVoidCallBack op = OpForVoidCallBack.create(buf, callback); - pendingRequests.put(requestId, op); - timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); - buf.retain(); - cnx().ctx().writeAndFlush(buf, cnx().ctx().voidPromise()); + OpForVoidCallBack op = OpForVoidCallBack.create(buf, callback, client); + internalPinnedExecutor.execute(() -> { + pendingRequests.put(requestId, op); + timeoutQueue.add(new RequestTime(System.currentTimeMillis(), requestId)); + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); return callback; } void handleEndTxnResponse(CommandEndTxnResponse response) { - OpForVoidCallBack op = (OpForVoidCallBack) pendingRequests.remove(response.getRequestId()); - if (op == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Got end txn response for timeout {} - {}", response.getTxnidMostBits(), - response.getTxnidLeastBits()); + boolean hasError = response.hasError(); + ServerError error; + String message; + if (hasError) { + error = response.getError(); + message = response.getMessage(); + } else { + error = null; + message = null; + } + TxnID txnID = new TxnID(response.getTxnidMostBits(), response.getTxnidLeastBits()); + long requestId = response.getRequestId(); + internalPinnedExecutor.execute(() -> { + OpForVoidCallBack op = (OpForVoidCallBack) pendingRequests.remove(requestId); + if (op == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Got end txn response for timeout {} - {}", txnID.getMostSigBits(), + txnID.getLeastSigBits()); + } + return; } - return; - } - if (!response.hasError()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Got end txn response success for request {}", response.getRequestId()); - } - op.callback.complete(null); - } else { - LOG.error("Got end txn response for request {} error {}", response.getRequestId(), response.getError()); - handleTransactionFailOp(response.getError(), response.getMessage(), op); - } + if (!hasError) { + if (LOG.isDebugEnabled()) { + LOG.debug("Got end txn response success for request {}", requestId); + } + op.callback.complete(null); + } else { + if (checkIfNeedRetryByError(error, message, op)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Get a response for the {} request {} error " + + "TransactionCoordinatorNotFound and try it again", + BaseCommand.Type.END_TXN.name(), requestId); + } + pendingRequests.put(requestId, op); + timer.newTimeout(timeout -> { + internalPinnedExecutor.execute(() -> { + if (!pendingRequests.containsKey(requestId)) { + if (LOG.isDebugEnabled()) { + LOG.debug("The request {} already timeout", requestId); + } + return; + } + if (!checkStateAndSendRequest(op)) { + pendingRequests.remove(requestId); + } + }); + } + , op.backoff.next(), TimeUnit.MILLISECONDS); + return; + } + LOG.error("Got {} response for request {} error {}", BaseCommand.Type.END_TXN.name(), + requestId, error); - onResponse(op); + } + onResponse(op); + }); } - private void handleTransactionFailOp(ServerError error, String message, OpBase op) { - if (error == ServerError.TransactionCoordinatorNotFound && getState() != State.Connecting) { - connectionHandler.reconnectLater(new TransactionCoordinatorClientException - .CoordinatorNotFoundException(message)); + + private boolean checkIfNeedRetryByError(ServerError error, String message, OpBase op) { + if (error == ServerError.TransactionCoordinatorNotFound) { + if (getState() != State.Connecting) { + connectionHandler.reconnectLater(new TransactionCoordinatorClientException + .CoordinatorNotFoundException(message)); + } + return true; } if (op != null) { op.callback.completeExceptionally(getExceptionByServerError(error, message)); } + return false; } private static abstract class OpBase { protected ByteBuf cmd; protected CompletableFuture callback; + protected Backoff backoff; abstract void recycle(); } private static class OpForTxnIdCallBack extends OpBase { - static OpForTxnIdCallBack create(ByteBuf cmd, CompletableFuture callback) { + static OpForTxnIdCallBack create(ByteBuf cmd, CompletableFuture callback, PulsarClientImpl client) { OpForTxnIdCallBack op = RECYCLER.get(); op.callback = callback; op.cmd = cmd; + op.backoff = new BackoffBuilder() + .setInitialTime(client.getConfiguration().getInitialBackoffIntervalNanos(), + TimeUnit.NANOSECONDS) + .setMax(client.getConfiguration().getMaxBackoffIntervalNanos() / 10, TimeUnit.NANOSECONDS) + .setMandatoryStop(0, TimeUnit.MILLISECONDS) + .create(); return op; } @@ -374,6 +567,9 @@ private OpForTxnIdCallBack(Recycler.Handle recyclerHandle) { @Override void recycle() { + this.backoff = null; + this.cmd = null; + this.callback = null; recyclerHandle.recycle(this); } @@ -388,18 +584,29 @@ protected OpForTxnIdCallBack newObject(Handle handle) { private static class OpForVoidCallBack extends OpBase { - static OpForVoidCallBack create(ByteBuf cmd, CompletableFuture callback) { + + static OpForVoidCallBack create(ByteBuf cmd, CompletableFuture callback, PulsarClientImpl client) { OpForVoidCallBack op = RECYCLER.get(); op.callback = callback; op.cmd = cmd; + op.backoff = new BackoffBuilder() + .setInitialTime(client.getConfiguration().getInitialBackoffIntervalNanos(), + TimeUnit.NANOSECONDS) + .setMax(client.getConfiguration().getMaxBackoffIntervalNanos() / 10, TimeUnit.NANOSECONDS) + .setMandatoryStop(0, TimeUnit.MILLISECONDS) + .create(); return op; } + private OpForVoidCallBack(Recycler.Handle recyclerHandle) { this.recyclerHandle = recyclerHandle; } @Override void recycle() { + this.backoff = null; + this.cmd = null; + this.callback = null; recyclerHandle.recycle(this); } @@ -432,9 +639,6 @@ private void onResponse(OpBase op) { } private boolean canSendRequest(CompletableFuture callback) { - if (!isValidHandlerState(callback)) { - return false; - } try { if (blockIfReachMaxPendingOps) { semaphore.acquire(); @@ -452,81 +656,89 @@ private boolean canSendRequest(CompletableFuture callback) { return true; } - private boolean isValidHandlerState(CompletableFuture callback) { + private boolean checkStateAndSendRequest(OpBase op) { switch (getState()) { case Ready: + ClientCnx cnx = cnx(); + if (cnx != null) { + op.cmd.retain(); + cnx.ctx().writeAndFlush(op.cmd, cnx().ctx().voidPromise()); + } else { + LOG.error("The cnx was null when the TC handler was ready", new NullPointerException()); + } return true; case Connecting: - callback.completeExceptionally( - new TransactionCoordinatorClientException.MetaStoreHandlerNotReadyException( - "Transaction meta store handler for tcId " - + transactionCoordinatorId - + " is connecting now, please try later.")); - return false; + return true; case Closing: case Closed: - callback.completeExceptionally( + op.callback.completeExceptionally( new TransactionCoordinatorClientException.MetaStoreHandlerNotReadyException( "Transaction meta store handler for tcId " + transactionCoordinatorId + " is closing or closed.")); + onResponse(op); return false; case Failed: case Uninitialized: - callback.completeExceptionally( + op.callback.completeExceptionally( new TransactionCoordinatorClientException.MetaStoreHandlerNotReadyException( "Transaction meta store handler for tcId " + transactionCoordinatorId + " not connected.")); + onResponse(op); return false; default: - callback.completeExceptionally( + op.callback.completeExceptionally( new TransactionCoordinatorClientException.MetaStoreHandlerNotReadyException( transactionCoordinatorId)); + onResponse(op); return false; } } @Override public void run(Timeout timeout) throws Exception { - if (timeout.isCancelled()) { - return; - } - long timeToWaitMs; - if (getState() == State.Closing || getState() == State.Closed) { - return; - } - RequestTime peeked = timeoutQueue.peek(); - while (peeked != null && peeked.creationTimeMs + client.getConfiguration().getOperationTimeoutMs() - - System.currentTimeMillis() <= 0) { - RequestTime lastPolled = timeoutQueue.poll(); - if (lastPolled != null) { - OpBase op = pendingRequests.remove(lastPolled.requestId); - if (op != null && !op.callback.isDone()) { - op.callback.completeExceptionally(new PulsarClientException.TimeoutException( - "Could not get response from transaction meta store within given timeout.")); - if (LOG.isDebugEnabled()) { - LOG.debug("Transaction coordinator request {} is timeout.", lastPolled.requestId); + internalPinnedExecutor.execute(() -> { + if (timeout.isCancelled()) { + return; + } + long timeToWaitMs; + if (getState() == State.Closing || getState() == State.Closed) { + return; + } + RequestTime peeked = timeoutQueue.peek(); + while (peeked != null && peeked.creationTimeMs + client.getConfiguration().getOperationTimeoutMs() + - System.currentTimeMillis() <= 0) { + RequestTime lastPolled = timeoutQueue.poll(); + if (lastPolled != null) { + OpBase op = pendingRequests.remove(lastPolled.requestId); + if (op != null && !op.callback.isDone()) { + op.callback.completeExceptionally(new PulsarClientException.TimeoutException( + "Could not get response from transaction meta store within given timeout.")); + if (LOG.isDebugEnabled()) { + LOG.debug("Transaction coordinator request {} is timeout.", lastPolled.requestId); + } + onResponse(op); } - onResponse(op); + } else { + break; } - } else { - break; + peeked = timeoutQueue.peek(); } - peeked = timeoutQueue.peek(); - } - if (peeked == null) { - timeToWaitMs = client.getConfiguration().getOperationTimeoutMs(); - } else { - long diff = (peeked.creationTimeMs + client.getConfiguration().getOperationTimeoutMs()) - System.currentTimeMillis(); - if (diff <= 0) { + if (peeked == null) { timeToWaitMs = client.getConfiguration().getOperationTimeoutMs(); } else { - timeToWaitMs = diff; + long diff = (peeked.creationTimeMs + client.getConfiguration().getOperationTimeoutMs()) + - System.currentTimeMillis(); + if (diff <= 0) { + timeToWaitMs = client.getConfiguration().getOperationTimeoutMs(); + } else { + timeToWaitMs = diff; + } } - } - requestTimeout = client.timer().newTimeout(this, timeToWaitMs, TimeUnit.MILLISECONDS); + requestTimeout = client.timer().newTimeout(this, timeToWaitMs, TimeUnit.MILLISECONDS); + }); } private ClientCnx cnx() { @@ -540,6 +752,12 @@ void connectionClosed(ClientCnx cnx) { @Override public void close() throws IOException { this.requestTimeout.cancel(); + this.setState(State.Closed); + } + + @VisibleForTesting + public State getConnectHandleState() { + return getState(); } @Override diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java index db616f20450a4..020c925d0b061 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/UnAckedMessageTracker.java @@ -30,6 +30,7 @@ import java.io.Closeable; import java.util.ArrayDeque; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.Set; @@ -121,6 +122,10 @@ public UnAckedMessageTracker(PulsarClientImpl client, ConsumerBase consumerBa timeout = client.timer().newTimeout(new TimerTask() { @Override public void run(Timeout t) throws Exception { + if (t.isCancelled()) { + return; + } + Set messageIds = TL_MESSAGE_IDS_SET.get(); messageIds.clear(); @@ -130,7 +135,7 @@ public void run(Timeout t) throws Exception { if (!headPartition.isEmpty()) { log.warn("[{}] {} messages have timed-out", consumerBase, headPartition.size()); headPartition.forEach(messageId -> { - addChunkedMessageIdsAndRemoveFromSequnceMap(messageId, messageIds, consumerBase); + addChunkedMessageIdsAndRemoveFromSequenceMap(messageId, messageIds, consumerBase); messageIds.add(messageId); messageIdPartitionMap.remove(messageId); }); @@ -139,25 +144,27 @@ public void run(Timeout t) throws Exception { headPartition.clear(); timePartitions.addLast(headPartition); } finally { - writeLock.unlock(); - if (messageIds.size() > 0) { - consumerBase.onAckTimeoutSend(messageIds); - consumerBase.redeliverUnacknowledgedMessages(messageIds); + try { + timeout = client.timer().newTimeout(this, tickDurationInMs, TimeUnit.MILLISECONDS); + } finally { + writeLock.unlock(); + + if (!messageIds.isEmpty()) { + consumerBase.onAckTimeoutSend(messageIds); + consumerBase.redeliverUnacknowledgedMessages(messageIds); + } } - timeout = client.timer().newTimeout(this, tickDurationInMs, TimeUnit.MILLISECONDS); } } }, this.tickDurationInMs, TimeUnit.MILLISECONDS); } - public static void addChunkedMessageIdsAndRemoveFromSequnceMap(MessageId messageId, Set messageIds, - ConsumerBase consumerBase) { + public static void addChunkedMessageIdsAndRemoveFromSequenceMap(MessageId messageId, Set messageIds, + ConsumerBase consumerBase) { if (messageId instanceof MessageIdImpl) { MessageIdImpl[] chunkedMsgIds = consumerBase.unAckedChunkedMessageIdSequenceMap.get((MessageIdImpl) messageId); if (chunkedMsgIds != null && chunkedMsgIds.length > 0) { - for (MessageIdImpl msgId : chunkedMsgIds) { - messageIds.add(msgId); - } + Collections.addAll(messageIds, chunkedMsgIds); } consumerBase.unAckedChunkedMessageIdSequenceMap.remove((MessageIdImpl) messageId); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ZeroQueueConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ZeroQueueConsumerImpl.java index 42361b67caf47..6375e37a89cc9 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ZeroQueueConsumerImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ZeroQueueConsumerImpl.java @@ -54,7 +54,7 @@ public ZeroQueueConsumerImpl(PulsarClientImpl client, String topic, ConsumerConf CompletableFuture> subscribeFuture, MessageId startMessageId, Schema schema, ConsumerInterceptors interceptors, boolean createTopicIfDoesNotExist) { - super(client, topic, conf, executorProvider, partitionIndex, hasParentConsumer, subscribeFuture, + super(client, topic, conf, executorProvider, partitionIndex, hasParentConsumer, false, subscribeFuture, startMessageId, 0 /* startMessageRollbackDurationInSec */, schema, interceptors, createTopicIfDoesNotExist); } @@ -174,7 +174,7 @@ private void triggerZeroQueueSizeListener(final Message message) { } @Override - protected void triggerListener() { + protected void tryTriggerListener() { // Ignore since it was already triggered in the triggerZeroQueueSizeListener() call } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationFactoryOAuth2.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationFactoryOAuth2.java index 707fcaf99c6d3..cf567747567e4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationFactoryOAuth2.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationFactoryOAuth2.java @@ -33,7 +33,7 @@ public final class AuthenticationFactoryOAuth2 { * * @param issuerUrl the issuer URL * @param credentialsUrl the credentials URL - * @param audience the audience identifier + * @param audience An optional field. The audience identifier used by some Identity Providers, like Auth0. * @return an Authentication object */ public static Authentication clientCredentials(URL issuerUrl, URL credentialsUrl, String audience) { @@ -45,9 +45,9 @@ public static Authentication clientCredentials(URL issuerUrl, URL credentialsUrl * * @param issuerUrl the issuer URL * @param credentialsUrl the credentials URL - * @param audience the audience identifier + * @param audience An optional field. The audience identifier used by some Identity Providers, like Auth0. * @param scope An optional field. The value of the scope parameter is expressed as a list of space-delimited, - * case-sensitive strings. The strings are defined by the authorization server. + * case-sensitive strings. The strings are defined by the authorization server. * If the value contains multiple space-delimited strings, their order does not matter, * and each string adds an additional access range to the requested scope. * From here: https://datatracker.ietf.org/doc/html/rfc6749#section-4.4.2 diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/ClientCredentialsFlow.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/ClientCredentialsFlow.java index b011e85dde0f5..bf0c289c0df60 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/ClientCredentialsFlow.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/ClientCredentialsFlow.java @@ -118,10 +118,10 @@ public void close() throws Exception { */ public static ClientCredentialsFlow fromParameters(Map params) { URL issuerUrl = parseParameterUrl(params, CONFIG_PARAM_ISSUER_URL); - String audience = parseParameterString(params, CONFIG_PARAM_AUDIENCE); String privateKeyUrl = parseParameterString(params, CONFIG_PARAM_KEY_FILE); - // This is an optional parameter + // These are optional parameters, so we only perform a get String scope = params.get(CONFIG_PARAM_SCOPE); + String audience = params.get(CONFIG_PARAM_AUDIENCE); return ClientCredentialsFlow.builder() .issuerUrl(issuerUrl) .audience(audience) diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/README.md b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/README.md index 55ffe58cf0503..b8b1237aa153b 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/README.md +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/README.md @@ -46,7 +46,7 @@ The following parameters are supported: | `type` | Oauth 2.0 auth type. Optional. | default: `client_credentials` | | `issuerUrl` | URL of the provider which allows Pulsar to obtain an access token. Required. | `https://accounts.google.com` | | `privateKey` | URL to a JSON credentials file (in JSON format; see below). Required. | See "Supported Pattern Formats" | -| `audience` | An OAuth 2.0 "resource server" identifier for the Pulsar cluster. Required. | `https://broker.example.com` | +| `audience` | An OAuth 2.0 "resource server" identifier for the Pulsar cluster. Required by some Identity Providers. Optional for client. | `https://broker.example.com` | ### Supported Pattern Formats of `privateKey` The `privateKey` parameter supports the following three pattern formats, and contains client Credentials: @@ -88,7 +88,7 @@ curl --request POST \ In which, - `issuerUrl` parameter in this plugin is mapped to `--url https://dev-kt-aa9ne.us.auth0.com` - `privateKey` file parameter in this plugin should at least contains fields `client_id` and `client_secret`. -- `audience` parameter in this plugin is mapped to `"audience":"https://dev-kt-aa9ne.us.auth0.com/api/v2/"` +- `audience` parameter in this plugin is mapped to `"audience":"https://dev-kt-aa9ne.us.auth0.com/api/v2/"`. This field is only used by some identity providers. ## Pulsar Client Config You can use the provider with the following Pulsar clients. diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/DefaultMetadataResolver.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/DefaultMetadataResolver.java index 2c09113055ba0..7d6ca1e5efabd 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/DefaultMetadataResolver.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/DefaultMetadataResolver.java @@ -33,6 +33,9 @@ */ public class DefaultMetadataResolver implements MetadataResolver { + protected static final int DEFAULT_CONNECT_TIMEOUT_IN_SECONDS = 10; + protected static final int DEFAULT_READ_TIMEOUT_IN_SECONDS = 30; + private final URL metadataUrl; private final ObjectReader objectReader; private Duration connectTimeout; @@ -41,6 +44,9 @@ public class DefaultMetadataResolver implements MetadataResolver { public DefaultMetadataResolver(URL metadataUrl) { this.metadataUrl = metadataUrl; this.objectReader = new ObjectMapper().readerFor(Metadata.class); + // set a default timeout to ensure that this doesn't block + this.connectTimeout = Duration.ofSeconds(DEFAULT_CONNECT_TIMEOUT_IN_SECONDS); + this.readTimeout = Duration.ofSeconds(DEFAULT_READ_TIMEOUT_IN_SECONDS); } public DefaultMetadataResolver withConnectTimeout(Duration connectTimeout) { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClient.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClient.java index f8667e8625a77..1028da5f3e6b3 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClient.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClient.java @@ -54,6 +54,7 @@ public TokenClient(URL tokenUrl) { TokenClient(URL tokenUrl, AsyncHttpClient httpClient) { if (httpClient == null) { DefaultAsyncHttpClientConfig.Builder confBuilder = new DefaultAsyncHttpClientConfig.Builder(); + confBuilder.setUseProxyProperties(true); confBuilder.setFollowRedirect(true); confBuilder.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_IN_SECONDS * 1000); confBuilder.setReadTimeout(DEFAULT_READ_TIMEOUT_IN_SECONDS * 1000); @@ -73,10 +74,21 @@ public void close() throws Exception { /** * Constructing http request parameters. - * @param bodyMap List of parameters to be requested. + * @param req object with relevant request parameters * @return Generate the final request body from a map. */ - String buildClientCredentialsBody(Map bodyMap) { + String buildClientCredentialsBody(ClientCredentialsExchangeRequest req) { + Map bodyMap = new TreeMap<>(); + bodyMap.put("grant_type", "client_credentials"); + bodyMap.put("client_id", req.getClientId()); + bodyMap.put("client_secret", req.getClientSecret()); + // Only set audience and scope if they are non-empty. + if (!StringUtils.isBlank(req.getAudience())) { + bodyMap.put("audience", req.getAudience()); + } + if (!StringUtils.isBlank(req.getScope())) { + bodyMap.put("scope", req.getScope()); + } return bodyMap.entrySet().stream() .map(e -> { try { @@ -96,15 +108,7 @@ String buildClientCredentialsBody(Map bodyMap) { */ public TokenResult exchangeClientCredentials(ClientCredentialsExchangeRequest req) throws TokenExchangeException, IOException { - Map bodyMap = new TreeMap<>(); - bodyMap.put("grant_type", "client_credentials"); - bodyMap.put("client_id", req.getClientId()); - bodyMap.put("client_secret", req.getClientSecret()); - bodyMap.put("audience", req.getAudience()); - if (!StringUtils.isBlank(req.getScope())) { - bodyMap.put("scope", req.getScope()); - } - String body = buildClientCredentialsBody(bodyMap); + String body = buildClientCredentialsBody(req); try { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java index 9765cc484cbf4..3044b2a4c3bd4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/conf/ClientConfigurationData.java @@ -254,6 +254,7 @@ public class ClientConfigurationData implements Serializable, Cloneable { name = "tlsTrustStorePassword", value = "Password of TLS TrustStore." ) + @Secret private String tlsTrustStorePassword = null; @ApiModelProperty( @@ -312,6 +313,7 @@ public class ClientConfigurationData implements Serializable, Cloneable { name = "socks5ProxyUsername", value = "Password of SOCKS5 proxy." ) + @Secret private String socks5ProxyPassword; public Authentication getAuthentication() { diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AbstractSchema.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AbstractSchema.java index 8cf7a05cc1719..33c2ed17836ac 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AbstractSchema.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AbstractSchema.java @@ -75,14 +75,13 @@ public Schema clone() { * @param schemaVersion the version * @return the schema at that specific version * @throws SchemaSerializationException in case of unknown schema version - * @throws NullPointerException in case of null schemaVersion + * @throws NullPointerException in case of null schemaVersion and supportSchemaVersioning is true */ public Schema atSchemaVersion(byte[] schemaVersion) throws SchemaSerializationException { - Objects.requireNonNull(schemaVersion); if (!supportSchemaVersioning()) { return this; - } else { - throw new SchemaSerializationException("Not implemented for " + this.getClass()); } + Objects.requireNonNull(schemaVersion); + throw new SchemaSerializationException("Not implemented for " + this.getClass()); } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AvroSchema.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AvroSchema.java index cc31b1970a2fc..8bb2ebeee0ae1 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AvroSchema.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/AvroSchema.java @@ -91,9 +91,12 @@ public static AvroSchema of(SchemaDefinition schemaDefinition) { schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO)); } ClassLoader pojoClassLoader = null; - if (schemaDefinition.getPojo() != null) { + if (schemaDefinition.getClassLoader() != null) { + pojoClassLoader = schemaDefinition.getClassLoader(); + } else if (schemaDefinition.getPojo() != null) { pojoClassLoader = schemaDefinition.getPojo().getClassLoader(); } + return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader); } @@ -106,12 +109,21 @@ public static AvroSchema of(Class pojo, Map properties } public static void addLogicalTypeConversions(ReflectData reflectData, boolean jsr310ConversionEnabled) { - reflectData.addLogicalTypeConversion(new Conversions.DecimalConversion()); + addLogicalTypeConversions(reflectData, jsr310ConversionEnabled, true); + } + + public static void addLogicalTypeConversions(ReflectData reflectData, boolean jsr310ConversionEnabled, + boolean decimalConversionEnabled) { + if (decimalConversionEnabled) { + reflectData.addLogicalTypeConversion(new Conversions.DecimalConversion()); + } reflectData.addLogicalTypeConversion(new TimeConversions.DateConversion()); reflectData.addLogicalTypeConversion(new TimeConversions.TimeMillisConversion()); reflectData.addLogicalTypeConversion(new TimeConversions.TimeMicrosConversion()); - reflectData.addLogicalTypeConversion(new TimeConversions.TimestampMicrosConversion()); + reflectData.addLogicalTypeConversion(new TimeConversions.LocalTimestampMillisConversion()); + reflectData.addLogicalTypeConversion(new TimeConversions.LocalTimestampMicrosConversion()); if (jsr310ConversionEnabled) { + // The conversion that is registered first is higher priority than the registered later. reflectData.addLogicalTypeConversion(new TimeConversions.TimestampMillisConversion()); } else { try { @@ -121,6 +133,7 @@ public static void addLogicalTypeConversions(ReflectData reflectData, boolean js // Skip if have not provide joda-time dependency. } } + reflectData.addLogicalTypeConversion(new TimeConversions.TimestampMicrosConversion()); reflectData.addLogicalTypeConversion(new Conversions.UUIDConversion()); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/ByteBufSchema.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/ByteBufSchema.java index ce68298be2b49..7665d96ee727d 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/ByteBufSchema.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/ByteBufSchema.java @@ -64,11 +64,7 @@ public ByteBuf decode(byte[] bytes) { @Override public ByteBuf decode(ByteBuf byteBuf) { - if (null == byteBuf) { - return null; - } else { - return byteBuf; - } + return byteBuf; } @Override diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/RecordSchemaBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/RecordSchemaBuilderImpl.java index 0fda7d52b0dbe..5854a80716f63 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/RecordSchemaBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/RecordSchemaBuilderImpl.java @@ -109,6 +109,7 @@ public SchemaInfo build(SchemaType schemaType) { name, baseSchema.toString().getBytes(UTF_8), schemaType, + System.currentTimeMillis(), properties ); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionBuilderImpl.java index d929c019d194f..80e30f0d64cc4 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionBuilderImpl.java @@ -42,6 +42,11 @@ public class SchemaDefinitionBuilderImpl implements SchemaDefinitionBuilder clazz; + /** + * the classLoader definition class. + */ + private ClassLoader classLoader; + /** * The flag of schema type always allow null * @@ -102,6 +107,12 @@ public SchemaDefinitionBuilder withPojo(Class clazz) { return this; } + @Override + public SchemaDefinitionBuilder withClassLoader(ClassLoader classLoader) { + this.classLoader = classLoader; + return this; + } + @Override public SchemaDefinitionBuilder withJsonDef(String jsonDef) { this.jsonDef = jsonDef; @@ -151,8 +162,8 @@ public SchemaDefinition build() { properties.put(ALWAYS_ALLOW_NULL, String.valueOf(this.alwaysAllowNull)); properties.put(JSR310_CONVERSION_ENABLED, String.valueOf(this.jsr310ConversionEnabled)); - return new SchemaDefinitionImpl(clazz, jsonDef, alwaysAllowNull, properties, supportSchemaVersioning, - jsr310ConversionEnabled, reader, writer); + return new SchemaDefinitionImpl(clazz, jsonDef, classLoader, + alwaysAllowNull, properties, supportSchemaVersioning, jsr310ConversionEnabled, reader, writer); } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionImpl.java index 9b026e4dac7d5..1842ab50558b8 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/SchemaDefinitionImpl.java @@ -51,6 +51,8 @@ public class SchemaDefinitionImpl implements SchemaDefinition { private final String jsonDef; + private final ClassLoader classLoader; + private final boolean supportSchemaVersioning; private final boolean jsr310ConversionEnabled; @@ -59,12 +61,15 @@ public class SchemaDefinitionImpl implements SchemaDefinition { private final SchemaWriter writer; - public SchemaDefinitionImpl(Class pojo, String jsonDef, boolean alwaysAllowNull, Map properties, - boolean supportSchemaVersioning, boolean jsr310ConversionEnabled, SchemaReader reader, SchemaWriter writer) { + public SchemaDefinitionImpl(Class pojo, String jsonDef, ClassLoader classLoader, + boolean alwaysAllowNull, Map properties, + boolean supportSchemaVersioning, boolean jsr310ConversionEnabled, + SchemaReader reader, SchemaWriter writer) { this.alwaysAllowNull = alwaysAllowNull; this.properties = properties; this.jsonDef = jsonDef; this.pojo = pojo; + this.classLoader = classLoader; this.supportSchemaVersioning = supportSchemaVersioning; this.jsr310ConversionEnabled = jsr310ConversionEnabled; this.reader = reader; @@ -104,6 +109,11 @@ public Class getPojo() { return pojo; } + @Override + public ClassLoader getClassLoader() { + return this.classLoader; + } + @Override public boolean getSupportSchemaVersioning() { return supportSchemaVersioning; diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/generic/GenericJsonReader.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/generic/GenericJsonReader.java index f0b2c86508b3b..1a95e9be152da 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/generic/GenericJsonReader.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/generic/GenericJsonReader.java @@ -18,35 +18,31 @@ */ package org.apache.pulsar.client.impl.schema.generic; +import static java.nio.charset.StandardCharsets.UTF_8; +import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; import org.apache.pulsar.client.api.SchemaSerializationException; import org.apache.pulsar.client.api.schema.Field; import org.apache.pulsar.client.api.schema.GenericRecord; import org.apache.pulsar.client.api.schema.SchemaReader; - import org.apache.pulsar.common.schema.SchemaInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.io.InputStream; -import java.util.List; - -import static java.nio.charset.StandardCharsets.UTF_8; - public class GenericJsonReader implements SchemaReader { - private final ObjectMapper objectMapper; + private final ObjectReader objectReader; private final byte[] schemaVersion; private final List fields; private SchemaInfo schemaInfo; public GenericJsonReader(List fields, SchemaInfo schemaInfo){ - this.fields = fields; - this.schemaVersion = null; - this.objectMapper = new ObjectMapper(); - this.schemaInfo = schemaInfo; + this(null, fields, schemaInfo); } public GenericJsonReader(List fields){ @@ -58,16 +54,17 @@ public GenericJsonReader(byte[] schemaVersion, List fields){ } public GenericJsonReader(byte[] schemaVersion, List fields, SchemaInfo schemaInfo){ - this.objectMapper = new ObjectMapper(); this.fields = fields; this.schemaVersion = schemaVersion; this.schemaInfo = schemaInfo; + ObjectMapper objectMapper = new ObjectMapper(); + this.objectReader = objectMapper.reader().with(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS); } @Override public GenericJsonRecord read(byte[] bytes, int offset, int length) { try { - JsonNode jn = objectMapper.readTree(new String(bytes, offset, length, UTF_8)); + JsonNode jn = objectReader.readTree(new String(bytes, offset, length, UTF_8)); return new GenericJsonRecord(schemaVersion, fields, jn, schemaInfo); } catch (IOException ioe) { throw new SchemaSerializationException(ioe); @@ -77,7 +74,7 @@ public GenericJsonRecord read(byte[] bytes, int offset, int length) { @Override public GenericRecord read(InputStream inputStream) { try { - JsonNode jn = objectMapper.readTree(inputStream); + JsonNode jn = objectReader.readTree(inputStream); return new GenericJsonRecord(schemaVersion, fields, jn, schemaInfo); } catch (IOException ioe) { throw new SchemaSerializationException(ioe); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/util/SchemaUtil.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/util/SchemaUtil.java index 2d0c810d8f029..abf5208628daf 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/util/SchemaUtil.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/schema/util/SchemaUtil.java @@ -94,7 +94,7 @@ public static Schema extractAvroSchema(SchemaDefinition schemaDefinition, Class ReflectData reflectData = schemaDefinition.getAlwaysAllowNull() ? new ReflectData.AllowNull() : new ReflectData(); - AvroSchema.addLogicalTypeConversions(reflectData, schemaDefinition.isJsr310ConversionEnabled()); + AvroSchema.addLogicalTypeConversions(reflectData, schemaDefinition.isJsr310ConversionEnabled(), false); return reflectData.getSchema(pojo); } } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBufferHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBufferHandler.java index 4843e6ebc41d3..332857c850d3c 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBufferHandler.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBufferHandler.java @@ -72,4 +72,8 @@ CompletableFuture endTxnOnSubscription(String topic, String subscription, * Release resources. */ void close(); + + int getAvailableRequestCredits(); + + int getPendingRequestsCount(); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java index 84be46fb7b7e2..9878264ba49c3 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionBuilderImpl.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.client.impl.transaction; +import io.netty.util.Timeout; +import io.netty.util.TimerTask; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; @@ -67,8 +69,9 @@ public CompletableFuture build() { future.completeExceptionally(throwable); return; } - future.complete(new TransactionImpl(client, txnTimeout, - txnID.getLeastSigBits(), txnID.getMostSigBits())); + TransactionImpl transaction = new TransactionImpl(client, timeUnit.toMillis(txnTimeout), + txnID.getLeastSigBits(), txnID.getMostSigBits()); + future.complete(transaction); }); return future; } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java index 8db80545ad257..c320d13e166f0 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java @@ -52,7 +52,11 @@ public class TransactionCoordinatorClientImpl implements TransactionCoordinatorC private final PulsarClientImpl pulsarClient; private TransactionMetaStoreHandler[] handlers; - private ConcurrentLongHashMap handlerMap = new ConcurrentLongHashMap<>(16, 1); + private ConcurrentLongHashMap handlerMap = + ConcurrentLongHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); private final AtomicLong epoch = new AtomicLong(0); private static final AtomicReferenceFieldUpdater STATE_UPDATER = @@ -90,6 +94,7 @@ public CompletableFuture startAsync() { i, pulsarClient, getTCAssignTopicName(i), connectFuture); handlers[i] = handler; handlerMap.put(i, handler); + handler.start(); } } else { handlers = new TransactionMetaStoreHandler[1]; @@ -99,6 +104,7 @@ public CompletableFuture startAsync() { getTCAssignTopicName(-1), connectFuture); handlers[0] = handler; handlerMap.put(0, handler); + handler.start(); } STATE_UPDATER.set(TransactionCoordinatorClientImpl.this, State.READY); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java index 60c7829b11f41..bba53318695d9 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionImpl.java @@ -18,6 +18,8 @@ */ package org.apache.pulsar.client.impl.transaction; +import io.netty.util.Timeout; +import io.netty.util.TimerTask; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -25,6 +27,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import com.google.common.collect.Lists; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.tuple.Pair; @@ -48,7 +52,7 @@ */ @Slf4j @Getter -public class TransactionImpl implements Transaction { +public class TransactionImpl implements Transaction , TimerTask { private final PulsarClientImpl client; private final long transactionTimeoutMs; @@ -63,6 +67,14 @@ public class TransactionImpl implements Transaction { private final ArrayList> sendFutureList; private final ArrayList> ackFutureList; private volatile State state; + private static final AtomicReferenceFieldUpdater STATE_UPDATE = + AtomicReferenceFieldUpdater.newUpdater(TransactionImpl.class, State.class, "state"); + private final Timeout timeout; + + @Override + public void run(Timeout timeout) throws Exception { + STATE_UPDATE.compareAndSet(this, State.OPEN, State.TIMEOUT); + } public enum State { OPEN, @@ -70,7 +82,8 @@ public enum State { ABORTING, COMMITTED, ABORTED, - ERROR + ERROR, + TIMEOUT } TransactionImpl(PulsarClientImpl client, @@ -89,11 +102,14 @@ public enum State { this.sendFutureList = new ArrayList<>(); this.ackFutureList = new ArrayList<>(); + this.timeout = client.getTimer().newTimeout(this, transactionTimeoutMs, TimeUnit.MILLISECONDS); + } // register the topics that will be modified by this transaction public CompletableFuture registerProducedTopic(String topic) { - return checkIfOpen().thenCompose(value -> { + CompletableFuture completableFuture = new CompletableFuture<>(); + if (checkIfOpen(completableFuture)) { synchronized (TransactionImpl.this) { // we need to issue the request to TC to register the produced topic return registerPartitionMap.compute(topic, (key, future) -> { @@ -106,7 +122,9 @@ public CompletableFuture registerProducedTopic(String topic) { } }); } - }); + } else { + return completableFuture; + } } public synchronized void registerSendOp(CompletableFuture sendFuture) { @@ -115,7 +133,8 @@ public synchronized void registerSendOp(CompletableFuture sendFuture) // register the topics that will be modified by this transaction public CompletableFuture registerAckedTopic(String topic, String subscription) { - return checkIfOpen().thenCompose(value -> { + CompletableFuture completableFuture = new CompletableFuture<>(); + if (checkIfOpen(completableFuture)) { synchronized (TransactionImpl.this) { // we need to issue the request to TC to register the acked topic return registerSubscriptionMap.compute(Pair.of(topic, subscription), (key, future) -> { @@ -128,7 +147,9 @@ public CompletableFuture registerAckedTopic(String topic, String subscript } }); } - }); + } else { + return completableFuture; + } } public synchronized void registerAckOp(CompletableFuture ackFuture) { @@ -144,7 +165,8 @@ public synchronized void registerCumulativeAckConsumer(ConsumerImpl consumer) @Override public CompletableFuture commit() { - return checkIfOpen().thenCompose((value) -> { + timeout.cancel(); + return checkIfOpenOrCommitting().thenCompose((value) -> { CompletableFuture commitFuture = new CompletableFuture<>(); this.state = State.COMMITTING; allOpComplete().whenComplete((v, e) -> { @@ -172,7 +194,8 @@ public CompletableFuture commit() { @Override public CompletableFuture abort() { - return checkIfOpen().thenCompose(value -> { + timeout.cancel(); + return checkIfOpenOrAborting().thenCompose(value -> { CompletableFuture abortFuture = new CompletableFuture<>(); this.state = State.ABORTING; allOpComplete().whenComplete((v, e) -> { @@ -213,16 +236,40 @@ public TxnID getTxnID() { return new TxnID(txnIdMostBits, txnIdLeastBits); } - private CompletableFuture checkIfOpen() { + public boolean checkIfOpen(CompletableFuture completableFuture) { if (state == State.OPEN) { + return true; + } else { + completableFuture + .completeExceptionally(new InvalidTxnStatusException( + new TxnID(txnIdMostBits, txnIdLeastBits).toString(), state.name(), State.OPEN.name())); + return false; + } + } + + private CompletableFuture checkIfOpenOrCommitting() { + if (state == State.OPEN || state == State.COMMITTING) { return CompletableFuture.completedFuture(null); } else { - return FutureUtil.failedFuture(new InvalidTxnStatusException("[" + txnIdMostBits + ":" - + txnIdLeastBits + "] with unexpected state : " - + state.name() + ", expect " + State.OPEN + " state!")); + return invalidTxnStatusFuture(); } } + private CompletableFuture checkIfOpenOrAborting() { + if (state == State.OPEN || state == State.ABORTING) { + return CompletableFuture.completedFuture(null); + } else { + return invalidTxnStatusFuture(); + } + } + + private CompletableFuture invalidTxnStatusFuture() { + return FutureUtil.failedFuture(new InvalidTxnStatusException("[" + txnIdMostBits + ":" + + txnIdLeastBits + "] with unexpected state : " + + state.name() + ", expect " + State.OPEN + " state!")); + } + + private CompletableFuture allOpComplete() { List> futureList = new ArrayList<>(); futureList.addAll(sendFutureList); diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/util/ExecutorProvider.java b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ExecutorProvider.java index 1318d5665ae83..db11358057f78 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/util/ExecutorProvider.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ExecutorProvider.java @@ -28,7 +28,6 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -43,7 +42,7 @@ public class ExecutorProvider { private final String poolName; private volatile boolean isShutdown; - private static class ExtendedThreadFactory extends DefaultThreadFactory { + protected static class ExtendedThreadFactory extends DefaultThreadFactory { @Getter private Thread thread; @@ -58,7 +57,6 @@ public Thread newThread(Runnable r) { } } - public ExecutorProvider(int numThreads, String poolName) { checkArgument(numThreads > 0); this.numThreads = numThreads; @@ -67,13 +65,17 @@ public ExecutorProvider(int numThreads, String poolName) { for (int i = 0; i < numThreads; i++) { ExtendedThreadFactory threadFactory = new ExtendedThreadFactory( poolName, Thread.currentThread().isDaemon()); - ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(threadFactory); + ExecutorService executor = createExecutor(threadFactory); executors.add(Pair.of(executor, threadFactory)); } isShutdown = false; this.poolName = poolName; } + protected ExecutorService createExecutor(ExtendedThreadFactory threadFactory) { + return Executors.newSingleThreadExecutor(threadFactory); + } + public ExecutorService getExecutor() { return executors.get((currentThread.getAndIncrement() & Integer.MAX_VALUE) % numThreads).getKey(); } diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/util/RetryUtil.java b/pulsar-client/src/main/java/org/apache/pulsar/client/util/RetryUtil.java index 084a5839157d5..b3ed2c398d8bf 100644 --- a/pulsar-client/src/main/java/org/apache/pulsar/client/util/RetryUtil.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/util/RetryUtil.java @@ -30,7 +30,7 @@ public class RetryUtil { private static final Logger log = LoggerFactory.getLogger(RetryUtil.class); - public static void retryAsynchronously(Supplier supplier, Backoff backoff, + public static void retryAsynchronously(Supplier> supplier, Backoff backoff, ScheduledExecutorService scheduledExecutorService, CompletableFuture callback) { if (backoff.getMax() <= 0) { @@ -43,26 +43,24 @@ public static void retryAsynchronously(Supplier supplier, Backoff backoff executeWithRetry(supplier, backoff, scheduledExecutorService, callback)); } - private static void executeWithRetry(Supplier supplier, Backoff backoff, + private static void executeWithRetry(Supplier> supplier, Backoff backoff, ScheduledExecutorService scheduledExecutorService, CompletableFuture callback) { - try { - T result = supplier.get(); - callback.complete(result); - } catch (Exception e) { - long next = backoff.next(); - boolean isMandatoryStop = backoff.isMandatoryStopMade(); - if (isMandatoryStop) { - callback.completeExceptionally(e); - } else { - if (log.isDebugEnabled()) { - log.debug("execute with retry fail, will retry in {} ms", next, e); + supplier.get().whenComplete((result, e) -> { + if (e != null) { + long next = backoff.next(); + boolean isMandatoryStop = backoff.isMandatoryStopMade(); + if (isMandatoryStop) { + callback.completeExceptionally(e); + } else { + log.warn("Execution with retry fail, because of {}, will retry in {} ms", e.getMessage(), next); + scheduledExecutorService.schedule(() -> + executeWithRetry(supplier, backoff, scheduledExecutorService, callback), + next, TimeUnit.MILLISECONDS); } - log.info("Because of {} , will retry in {} ms", e.getMessage(), next); - scheduledExecutorService.schedule(() -> - executeWithRetry(supplier, backoff, scheduledExecutorService, callback), - next, TimeUnit.MILLISECONDS); + return; } - } + callback.complete(result); + }); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/EndOfTransactionException.java b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ScheduledExecutorProvider.java similarity index 61% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/EndOfTransactionException.java rename to pulsar-client/src/main/java/org/apache/pulsar/client/util/ScheduledExecutorProvider.java index 57fce9a0a6e37..887ae3bb7fff4 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/EndOfTransactionException.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/util/ScheduledExecutorProvider.java @@ -16,16 +16,21 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; +package org.apache.pulsar.client.util; -/** - * Exception thrown when reaching end of a transaction. - */ -public class EndOfTransactionException extends TransactionBufferException { +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import lombok.extern.slf4j.Slf4j; - private static final long serialVersionUID = 0L; +@Slf4j +public class ScheduledExecutorProvider extends ExecutorProvider { + + public ScheduledExecutorProvider(int numThreads, String poolName) { + super(numThreads, poolName); + } - public EndOfTransactionException(String message) { - super(message); + @Override + protected ExecutorService createExecutor(ExtendedThreadFactory threadFactory) { + return Executors.newSingleThreadScheduledExecutor(threadFactory); } } diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionStatusException.java b/pulsar-client/src/main/java/org/apache/pulsar/client/util/WithSNISslEngineFactory.java similarity index 50% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionStatusException.java rename to pulsar-client/src/main/java/org/apache/pulsar/client/util/WithSNISslEngineFactory.java index 008f27f58a411..965a7f2aec328 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/TransactionStatusException.java +++ b/pulsar-client/src/main/java/org/apache/pulsar/client/util/WithSNISslEngineFactory.java @@ -16,22 +16,27 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; +package org.apache.pulsar.client.util; -import org.apache.pulsar.client.api.transaction.TxnID; -import org.apache.pulsar.transaction.coordinator.proto.TxnStatus; +import java.util.Collections; +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import org.asynchttpclient.AsyncHttpClientConfig; +import org.asynchttpclient.netty.ssl.DefaultSslEngineFactory; -/** - * Exceptions are thrown when operations are applied to a transaction which is not in expected txn status. - */ -public class TransactionStatusException extends TransactionBufferException { +public class WithSNISslEngineFactory extends DefaultSslEngineFactory { + private final String host; - private static final long serialVersionUID = 0L; + public WithSNISslEngineFactory(String host) { + this.host = host; + } - public TransactionStatusException(TxnID txnId, - TxnStatus expectedStatus, - TxnStatus actualStatus) { - super("Transaction `" + txnId + "` is not in an expected status `" + expectedStatus - + "`, but is in status `" + actualStatus + "`"); + @Override + protected void configureSslEngine(SSLEngine sslEngine, AsyncHttpClientConfig config) { + super.configureSslEngine(sslEngine, config); + SSLParameters params = sslEngine.getSSLParameters(); + params.setServerNames(Collections.singletonList(new SNIHostName(host))); + sslEngine.setSSLParameters(params); } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/api/MessageRouterTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/api/MessageRouterTest.java index 48f5816e2d2d5..0c29ecb7fef8f 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/api/MessageRouterTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/api/MessageRouterTest.java @@ -32,7 +32,7 @@ */ public class MessageRouterTest { - private static class TestMessageRouter implements MessageRouter { + public static class TestMessageRouter implements MessageRouter { @Override public int choosePartition(Message msg) { @@ -43,7 +43,7 @@ public int choosePartition(Message msg) { @SuppressWarnings("deprecation") @Test public void testChoosePartition() { - MessageRouter router = spy(new TestMessageRouter()); + MessageRouter router = spy(TestMessageRouter.class); Message mockedMsg = mock(Message.class); TopicMetadata mockedMetadata = mock(TopicMetadata.class); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java index 9632a88793d20..d577f48357c89 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java @@ -22,22 +22,27 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; - import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; - +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.BitSet; import java.util.Collections; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - +import org.apache.pulsar.client.api.MessageId; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; import org.apache.pulsar.client.util.TimedCompletableFuture; import org.apache.pulsar.common.api.proto.CommandAck.AckType; +import org.apache.pulsar.common.util.collections.ConcurrentBitSetRecyclable; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.common.api.proto.ProtocolVersion; import org.testng.annotations.AfterClass; @@ -55,7 +60,8 @@ public class AcknowledgementsGroupingTrackerTest { public void setup() throws NoSuchFieldException, IllegalAccessException { eventLoopGroup = new NioEventLoopGroup(1); consumer = mock(ConsumerImpl.class); - consumer.unAckedChunkedMessageIdSequenceMap = new ConcurrentOpenHashMap<>(); + consumer.unAckedChunkedMessageIdSequenceMap = + ConcurrentOpenHashMap.newBuilder().build(); cnx = spy(new ClientCnxTest(new ClientConfigurationData(), new NioEventLoopGroup())); PulsarClientImpl client = mock(PulsarClientImpl.class); doReturn(client).when(consumer).getClient(); @@ -381,6 +387,36 @@ public void testBatchAckTrackerMultiAck(boolean isNeedReceipt) throws Exception tracker.close(); } + @Test + public void testDoIndividualBatchAckAsync() throws Exception{ + ConsumerConfigurationData conf = new ConsumerConfigurationData<>(); + AcknowledgmentsGroupingTracker tracker = new PersistentAcknowledgmentsGroupingTracker(consumer, conf, eventLoopGroup); + MessageId messageId1 = new BatchMessageIdImpl(5, 1, 0, 3, 10, BatchMessageAckerDisabled.INSTANCE); + BitSet bitSet = new BitSet(20); + for(int i = 0; i < 20; i ++) { + bitSet.set(i, true); + } + MessageId messageId2 = new BatchMessageIdImpl(3, 2, 0, 5, 20, BatchMessageAcker.newAcker(bitSet)); + Method doIndividualBatchAckAsync = PersistentAcknowledgmentsGroupingTracker.class + .getDeclaredMethod("doIndividualBatchAckAsync", BatchMessageIdImpl.class); + doIndividualBatchAckAsync.setAccessible(true); + doIndividualBatchAckAsync.invoke(tracker, messageId1); + doIndividualBatchAckAsync.invoke(tracker, messageId2); + Field pendingIndividualBatchIndexAcks = PersistentAcknowledgmentsGroupingTracker.class.getDeclaredField("pendingIndividualBatchIndexAcks"); + pendingIndividualBatchIndexAcks.setAccessible(true); + ConcurrentHashMap batchIndexAcks = + (ConcurrentHashMap) pendingIndividualBatchIndexAcks.get(tracker); + MessageIdImpl position1 = new MessageIdImpl(5, 1, 0); + MessageIdImpl position2 = new MessageIdImpl(3, 2, 0); + assertTrue(batchIndexAcks.containsKey(position1)); + assertNotNull(batchIndexAcks.get(position1)); + assertEquals(batchIndexAcks.get(position1).cardinality(), 9); + assertTrue(batchIndexAcks.containsKey(position2)); + assertNotNull(batchIndexAcks.get(position2)); + assertEquals(batchIndexAcks.get(position2).cardinality(), 19); + tracker.close(); + } + public class ClientCnxTest extends ClientCnx { public ClientCnxTest(ClientConfigurationData conf, EventLoopGroup eventLoopGroup) { diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java new file mode 100644 index 0000000000000..3d554871141e3 --- /dev/null +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/BatchMessageContainerImplTest.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import org.apache.bookkeeper.common.allocator.impl.ByteBufAllocatorBuilderImpl; +import org.apache.bookkeeper.common.allocator.impl.ByteBufAllocatorImpl; +import org.apache.pulsar.client.api.CompressionType; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.conf.ProducerConfigurationData; +import org.apache.pulsar.common.api.proto.MessageMetadata; +import org.mockito.Mockito; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.testng.IObjectFactory; +import org.testng.annotations.ObjectFactory; +import org.testng.annotations.Test; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +@PrepareForTest({ByteBufAllocatorImpl.class, ByteBufAllocatorBuilderImpl.class}) +@PowerMockIgnore({"javax.management.*", "javax.ws.*", "org.apache.logging.log4j.*"}) +public class BatchMessageContainerImplTest { + + @ObjectFactory + public IObjectFactory getObjectFactory() { + return new org.powermock.modules.testng.PowerMockObjectFactory(); + } + + @Test + public void recoveryAfterOom() throws Exception { + final ByteBufAllocatorImpl mockAllocator = PowerMockito.mock(ByteBufAllocatorImpl.class); + PowerMockito.whenNew(ByteBufAllocatorImpl.class).withAnyArguments().thenReturn(mockAllocator); + PowerMockito.when(mockAllocator.buffer(Mockito.anyInt(), Mockito.anyInt())).thenThrow(new OutOfMemoryError("test")).thenReturn(null); + final ProducerImpl producer = Mockito.mock(ProducerImpl.class); + final ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); + producerConfigurationData.setCompressionType(CompressionType.NONE); + Mockito.when(producer.getConfiguration()).thenReturn(producerConfigurationData); + final BatchMessageContainerImpl batchMessageContainer = new BatchMessageContainerImpl(); + batchMessageContainer.setProducer(producer); + MessageMetadata messageMetadata1 = new MessageMetadata(); + messageMetadata1.setSequenceId(1L); + messageMetadata1.setProducerName("producer1"); + messageMetadata1.setPublishTime(System.currentTimeMillis()); + ByteBuffer payload1 = ByteBuffer.wrap("payload1".getBytes(StandardCharsets.UTF_8)); + final MessageImpl message1 = MessageImpl.create(messageMetadata1, payload1, Schema.BYTES, null); + batchMessageContainer.add(message1, null); + MessageMetadata messageMetadata2 = new MessageMetadata(); + messageMetadata2.setSequenceId(1L); + messageMetadata2.setProducerName("producer1"); + messageMetadata2.setPublishTime(System.currentTimeMillis()); + ByteBuffer payload2 = ByteBuffer.wrap("payload2".getBytes(StandardCharsets.UTF_8)); + final MessageImpl message2 = MessageImpl.create(messageMetadata2, payload2, Schema.BYTES, null); + // after oom, our add can self-healing, won't throw exception + batchMessageContainer.add(message2, null); + } + +} diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientCnxTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientCnxTest.java index 558c0bfa13f76..a33d338fa2249 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientCnxTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ClientCnxTest.java @@ -32,16 +32,20 @@ import io.netty.util.concurrent.DefaultThreadFactory; import java.lang.reflect.Field; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadFactory; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.PulsarClientException.BrokerMetadataException; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.common.api.proto.CommandCloseConsumer; +import org.apache.pulsar.common.api.proto.CommandCloseProducer; import org.apache.pulsar.common.api.proto.CommandError; import org.apache.pulsar.common.api.proto.ServerError; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.PulsarHandler; import org.apache.pulsar.common.util.netty.EventLoopUtil; +import org.awaitility.Awaitility; import org.testng.annotations.Test; public class ClientCnxTest { @@ -72,6 +76,113 @@ public void testClientCnxTimeout() throws Exception { eventLoop.shutdownGracefully(); } + @Test + public void testPendingLookupRequestSemaphore() throws Exception { + EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, new DefaultThreadFactory("testClientCnxTimeout")); + ClientConfigurationData conf = new ClientConfigurationData(); + conf.setOperationTimeoutMs(10_000); + conf.setKeepAliveIntervalSeconds(0); + ClientCnx cnx = new ClientCnx(conf, eventLoop); + + ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + Channel channel = mock(Channel.class); + when(ctx.channel()).thenReturn(channel); + ChannelFuture listenerFuture = mock(ChannelFuture.class); + when(listenerFuture.addListener(any())).thenReturn(listenerFuture); + when(ctx.writeAndFlush(any())).thenReturn(listenerFuture); + cnx.channelActive(ctx); + CountDownLatch countDownLatch = new CountDownLatch(1); + CompletableFuture completableFuture = new CompletableFuture<>(); + new Thread(() -> { + try { + Thread.sleep(1_000); + CompletableFuture future = + cnx.newLookup(null, 123); + countDownLatch.countDown(); + future.get(); + } catch (Exception e) { + completableFuture.complete(e); + } + }).start(); + countDownLatch.await(); + cnx.channelInactive(ctx); + assertTrue(completableFuture.get().getCause() instanceof PulsarClientException.ConnectException); + // wait for subsequent calls over + Awaitility.await().untilAsserted(() -> { + assertEquals(cnx.getPendingLookupRequestSemaphore().availablePermits(), conf.getConcurrentLookupRequest()); + }); + eventLoop.shutdownGracefully(); + } + + @Test + public void testPendingLookupRequestSemaphoreServiceNotReady() throws Exception { + EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, new DefaultThreadFactory("testClientCnxTimeout")); + ClientConfigurationData conf = new ClientConfigurationData(); + conf.setOperationTimeoutMs(10_000); + conf.setKeepAliveIntervalSeconds(0); + ClientCnx cnx = new ClientCnx(conf, eventLoop); + + ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + Channel channel = mock(Channel.class); + when(ctx.channel()).thenReturn(channel); + ChannelFuture listenerFuture = mock(ChannelFuture.class); + when(listenerFuture.addListener(any())).thenReturn(listenerFuture); + when(ctx.writeAndFlush(any())).thenReturn(listenerFuture); + cnx.channelActive(ctx); + cnx.state = ClientCnx.State.Ready; + CountDownLatch countDownLatch = new CountDownLatch(1); + CompletableFuture completableFuture = new CompletableFuture<>(); + new Thread(() -> { + try { + Thread.sleep(1_000); + CompletableFuture future = + cnx.newLookup(null, 123); + countDownLatch.countDown(); + future.get(); + } catch (Exception e) { + completableFuture.complete(e); + } + }).start(); + countDownLatch.await(); + CommandError commandError = new CommandError(); + commandError.setRequestId(123L); + commandError.setError(ServerError.ServiceNotReady); + commandError.setMessage("Service not ready"); + cnx.handleError(commandError); + assertTrue(completableFuture.get().getCause() instanceof PulsarClientException.LookupException); + // wait for subsequent calls over + Awaitility.await().untilAsserted(() -> { + assertEquals(cnx.getPendingLookupRequestSemaphore().availablePermits(), conf.getConcurrentLookupRequest()); + }); + eventLoop.shutdownGracefully(); + } + + @Test + public void testPendingWaitingLookupRequestSemaphore() throws Exception { + EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, new DefaultThreadFactory("testClientCnxTimeout")); + ClientConfigurationData conf = new ClientConfigurationData(); + conf.setOperationTimeoutMs(10_000); + conf.setKeepAliveIntervalSeconds(0); + ClientCnx cnx = new ClientCnx(conf, eventLoop); + + ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + Channel channel = mock(Channel.class); + when(ctx.channel()).thenReturn(channel); + ChannelFuture listenerFuture = mock(ChannelFuture.class); + when(listenerFuture.addListener(any())).thenReturn(listenerFuture); + when(ctx.writeAndFlush(any())).thenReturn(listenerFuture); + cnx.channelActive(ctx); + for (int i = 0; i < 5001; i++) { + cnx.newLookup(null, i); + } + cnx.channelInactive(ctx); + // wait for subsequent calls over + Awaitility.await().untilAsserted(() -> { + assertEquals(cnx.getPendingLookupRequestSemaphore().availablePermits(), conf.getConcurrentLookupRequest()); + }); + eventLoop.shutdownGracefully(); + } + @Test public void testReceiveErrorAtSendConnectFrameState() throws Exception { ThreadFactory threadFactory = new DefaultThreadFactory("testReceiveErrorAtSendConnectFrameState"); @@ -152,4 +263,40 @@ public void testGetLastMessageIdWithError() throws Exception { eventLoop.shutdownGracefully(); } + + @Test + public void testHandleCloseConsumer() { + ThreadFactory threadFactory = new DefaultThreadFactory("testHandleCloseConsumer"); + EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, threadFactory); + ClientConfigurationData conf = new ClientConfigurationData(); + ClientCnx cnx = new ClientCnx(conf, eventLoop); + + long consumerId = 1; + cnx.registerConsumer(consumerId, mock(ConsumerImpl.class)); + assertEquals(cnx.consumers.size(), 1); + + CommandCloseConsumer closeConsumer = new CommandCloseConsumer().setConsumerId(consumerId); + cnx.handleCloseConsumer(closeConsumer); + assertEquals(cnx.consumers.size(), 0); + + eventLoop.shutdownGracefully(); + } + + @Test + public void testHandleCloseProducer() { + ThreadFactory threadFactory = new DefaultThreadFactory("testHandleCloseProducer"); + EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, threadFactory); + ClientConfigurationData conf = new ClientConfigurationData(); + ClientCnx cnx = new ClientCnx(conf, eventLoop); + + long producerId = 1; + cnx.registerProducer(producerId, mock(ProducerImpl.class)); + assertEquals(cnx.producers.size(), 1); + + CommandCloseProducer closeProducerCmd = new CommandCloseProducer().setProducerId(producerId); + cnx.handleCloseProducer(closeProducerCmd); + assertEquals(cnx.producers.size(), 0); + + eventLoop.shutdownGracefully(); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java index 13d63baee62ee..d648ee75af78c 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerBuilderImplTest.java @@ -18,8 +18,20 @@ */ package org.apache.pulsar.client.impl; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertNotNull; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; import org.apache.pulsar.client.api.BatchReceivePolicy; import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.DeadLetterPolicy; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; @@ -28,18 +40,6 @@ import org.testng.annotations.BeforeTest; import org.testng.annotations.Test; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.testng.Assert.assertNotNull; - /** * Unit tests of {@link ConsumerBuilderImpl}. */ @@ -288,6 +288,21 @@ public void testConsumerBuilderImplWhenBatchReceivePolicyIsNotValid() { .build()); } + @Test(expectedExceptions = IllegalArgumentException.class) + public void testRedeliverCountOfDeadLetterPolicy() { + consumerBuilderImpl.deadLetterPolicy(DeadLetterPolicy.builder() + .maxRedeliverCount(0) + .deadLetterTopic("test-dead-letter-topic") + .retryLetterTopic("test-retry-letter-topic") + .build()); + } + + @Test + public void testNullDeadLetterPolicy() { + consumerBuilderImpl.deadLetterPolicy(null); + verify(consumerBuilderImpl.getConf()).setDeadLetterPolicy(null); + } + @Test public void testConsumerBuilderImplWhenNumericPropertiesAreValid() { consumerBuilderImpl.negativeAckRedeliveryDelay(1, TimeUnit.MILLISECONDS); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java index 37c9e0cdeb377..8a9e665f9143a 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ConsumerImplTest.java @@ -187,4 +187,18 @@ public void testBatchReceiveAsyncCanBeCancelled() { // then Assert.assertFalse(consumer.hasPendingBatchReceive()); } + + @Test + public void testClose() { + Exception checkException = null; + try { + if (consumer != null) { + consumer.negativeAcknowledge(new MessageIdImpl(-1, -1, -1)); + consumer.close(); + } + } catch (Exception e) { + checkException = e; + } + Assert.assertNull(checkException); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/LastCumulativeAckTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/LastCumulativeAckTest.java new file mode 100644 index 0000000000000..102ccfc0e07a5 --- /dev/null +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/LastCumulativeAckTest.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotSame; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertSame; +import static org.testng.Assert.assertTrue; +import org.apache.pulsar.common.util.collections.BitSetRecyclable; +import org.testng.annotations.Test; + +public class LastCumulativeAckTest { + + @Test + public void testUpdate() { + final LastCumulativeAck lastCumulativeAck = new LastCumulativeAck(); + assertFalse(lastCumulativeAck.isFlushRequired()); + assertEquals(lastCumulativeAck.getMessageId(), LastCumulativeAck.DEFAULT_MESSAGE_ID); + assertNull(lastCumulativeAck.getBitSetRecyclable()); + + final MessageIdImpl messageId1 = new MessageIdImpl(0L, 1L, 10); + final BitSetRecyclable bitSetRecyclable1 = BitSetRecyclable.create(); + bitSetRecyclable1.set(0, 3); + lastCumulativeAck.update(messageId1, bitSetRecyclable1); + assertTrue(lastCumulativeAck.isFlushRequired()); + assertSame(lastCumulativeAck.getMessageId(), messageId1); + assertSame(lastCumulativeAck.getBitSetRecyclable(), bitSetRecyclable1); + + final MessageIdImpl messageId2 = new MessageIdImpl(0L, 2L, 8); + lastCumulativeAck.update(messageId2, bitSetRecyclable1); + // bitSetRecyclable1 is not recycled + assertEquals(bitSetRecyclable1.toString(), "{0, 1, 2}"); + + final BitSetRecyclable bitSetRecyclable2 = BitSetRecyclable.create(); + bitSetRecyclable2.set(0, 2); + + // `update()` only accepts a newer message ID, so this call here has no side effect + lastCumulativeAck.update(messageId2, bitSetRecyclable2); + assertSame(lastCumulativeAck.getBitSetRecyclable(), bitSetRecyclable1); + + final MessageIdImpl messageId3 = new MessageIdImpl(0L, 3L, 9); + lastCumulativeAck.update(messageId3, bitSetRecyclable2); + // bitSetRecyclable1 is recycled because it's replaced in `update` + assertEquals(bitSetRecyclable1.toString(), "{}"); + assertSame(lastCumulativeAck.getMessageId(), messageId3); + assertSame(lastCumulativeAck.getBitSetRecyclable(), bitSetRecyclable2); + bitSetRecyclable2.recycle(); + } + + @Test + public void testFlush() { + final LastCumulativeAck lastCumulativeAck = new LastCumulativeAck(); + assertNull(lastCumulativeAck.flush()); + + final MessageIdImpl messageId = new MessageIdImpl(0L, 1L, 3); + final BitSetRecyclable bitSetRecyclable = BitSetRecyclable.create(); + bitSetRecyclable.set(0, 3); + lastCumulativeAck.update(messageId, bitSetRecyclable); + assertTrue(lastCumulativeAck.isFlushRequired()); + + final LastCumulativeAck lastCumulativeAckToFlush = lastCumulativeAck.flush(); + assertFalse(lastCumulativeAck.isFlushRequired()); + assertSame(lastCumulativeAckToFlush.getMessageId(), messageId); + assertNotSame(lastCumulativeAckToFlush.getBitSetRecyclable(), bitSetRecyclable); + assertEquals(lastCumulativeAckToFlush.getBitSetRecyclable(), bitSetRecyclable); + } + +} diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageTest.java index 6d633e793d738..13cf4f6b78865 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MessageTest.java @@ -22,8 +22,8 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; - import java.nio.ByteBuffer; +import java.util.Optional; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.api.proto.MessageMetadata; @@ -81,4 +81,15 @@ public void testTopicMessageImplNoReplicatedInfo() { assertFalse(topicMessage.isReplicated()); assertNull(topicMessage.getReplicatedFrom()); } + + @Test + public void testMessageImplGetReaderSchema() { + MessageMetadata builder = new MessageMetadata(); + builder.hasSchemaVersion(); + ByteBuffer payload = ByteBuffer.wrap(new byte[0]); + Message msg = MessageImpl.create(builder, payload, Schema.BYTES, null); + + Optional> readerSchema = msg.getReaderSchema(); + assertTrue(readerSchema.isPresent()); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImplTest.java index 6af8914d6943d..fe8180694083c 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/MultiTopicsConsumerImplTest.java @@ -36,6 +36,7 @@ import org.apache.pulsar.client.util.ExecutorProvider; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.common.util.netty.EventLoopUtil; +import org.awaitility.Awaitility; import org.junit.After; import org.junit.Before; import org.testng.annotations.AfterMethod; @@ -123,7 +124,7 @@ public void testGetStats() throws Exception { // // Code under tests is using CompletableFutures. Theses may hang indefinitely if code is broken. // That's why a test timeout is defined. - @Test(timeOut = 5000) + @Test(timeOut = 10000) public void testParallelSubscribeAsync() throws Exception { String topicName = "parallel-subscribe-async-topic"; MultiTopicsConsumerImpl impl = createMultiTopicsConsumer(); @@ -165,7 +166,7 @@ public void testReceiveAsyncCanBeCancelled() { // given MultiTopicsConsumerImpl consumer = createMultiTopicsConsumer(); CompletableFuture> future = consumer.receiveAsync(); - assertTrue(consumer.hasNextPendingReceive()); + Awaitility.await().untilAsserted(() -> assertTrue(consumer.hasNextPendingReceive())); // when future.cancel(true); // then @@ -177,7 +178,7 @@ public void testBatchReceiveAsyncCanBeCancelled() { // given MultiTopicsConsumerImpl consumer = createMultiTopicsConsumer(); CompletableFuture> future = consumer.batchReceiveAsync(); - assertTrue(consumer.hasPendingBatchReceive()); + Awaitility.await().untilAsserted(() -> assertTrue(consumer.hasPendingBatchReceive())); // when future.cancel(true); // then diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java index 1f9496bbd505a..ad2c992b34c86 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PartitionedProducerImplTest.java @@ -202,4 +202,37 @@ public void testGetStats() throws Exception { impl.getStats(); } + @Test + public void testGetStatsWithoutArriveUpdateInterval() throws Exception { + String topicName = "test-stats-without-arrive-interval"; + ClientConfigurationData conf = new ClientConfigurationData(); + conf.setServiceUrl("pulsar://localhost:6650"); + conf.setStatsIntervalSeconds(100); + + ThreadFactory threadFactory = + new DefaultThreadFactory("client-test-stats", Thread.currentThread().isDaemon()); + EventLoopGroup eventLoopGroup = EventLoopUtil + .newEventLoopGroup(conf.getNumIoThreads(), false, threadFactory); + + PulsarClientImpl clientImpl = new PulsarClientImpl(conf, eventLoopGroup); + + ProducerConfigurationData producerConfData = new ProducerConfigurationData(); + producerConfData.setMessageRoutingMode(MessageRoutingMode.CustomPartition); + producerConfData.setCustomMessageRouter(new CustomMessageRouter()); + + assertEquals(Long.parseLong("100"), clientImpl.getConfiguration().getStatsIntervalSeconds()); + + PartitionedProducerImpl impl = new PartitionedProducerImpl<>( + clientImpl, topicName, producerConfData, + 1, null, null, null); + + impl.getProducers().get(0).getStats().incrementSendFailed(); + ProducerStatsRecorderImpl stats = impl.getStats(); + assertEquals(stats.getTotalSendFailed(), 0); + // When close producer, the ProducerStatsRecorder will update stats immediately + impl.close(); + stats = impl.getStats(); + assertEquals(stats.getTotalSendFailed(), 1); + } + } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerImplTest.java new file mode 100644 index 0000000000000..4db3cd0843b92 --- /dev/null +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerImplTest.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; +import static org.testng.Assert.assertTrue; +import org.apache.pulsar.client.api.Schema; +import org.mockito.Mockito; +import org.testng.annotations.Test; + +public class ProducerImplTest { + @Test + public void testPopulateMessageSchema() { + MessageImpl msg = mock(MessageImpl.class); + when(msg.hasReplicateFrom()).thenReturn(true); + when(msg.getSchemaInternal()).thenReturn(mock(Schema.class)); + when(msg.getSchemaInfoForReplicator()).thenReturn(null); + ProducerImpl producer = mock(ProducerImpl.class, withSettings() + .defaultAnswer(Mockito.CALLS_REAL_METHODS)); + assertTrue(producer.populateMessageSchema(msg, null)); + verify(msg).setSchemaState(MessageImpl.SchemaState.Ready); + } + +} diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java index d654158e9efa6..f6e7f284ce639 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/ProducerStatsRecorderImplTest.java @@ -54,4 +54,24 @@ public void testIncrementNumAcksReceived() throws Exception { Thread.sleep(1200); assertEquals(1000.0, recorder.getSendLatencyMillisMax(), 0.5); } + + @Test + public void testGetStatsAndCancelStatsTimeoutWithoutArriveUpdateInterval() { + ClientConfigurationData conf = new ClientConfigurationData(); + conf.setStatsIntervalSeconds(60); + PulsarClientImpl client = mock(PulsarClientImpl.class); + when(client.getConfiguration()).thenReturn(conf); + Timer timer = new HashedWheelTimer(); + when(client.timer()).thenReturn(timer); + ProducerImpl producer = mock(ProducerImpl.class); + when(producer.getTopic()).thenReturn("topic-test"); + when(producer.getProducerName()).thenReturn("producer-test"); + when(producer.getPendingQueueSize()).thenReturn(1); + ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); + ProducerStatsRecorderImpl recorder = new ProducerStatsRecorderImpl(client, producerConfigurationData, producer); + long latencyNs = TimeUnit.SECONDS.toNanos(1); + recorder.incrementNumAcksReceived(latencyNs); + recorder.cancelStatsTimeout(); + assertEquals(1000.0, recorder.getSendLatencyMillisMax(), 0.5); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java index 3f1e667517ee9..386294e24b784 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/PulsarClientImplTest.java @@ -48,10 +48,12 @@ import java.util.concurrent.ThreadFactory; import java.util.regex.Pattern; +import lombok.Cleanup; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.client.impl.conf.ConsumerConfigurationData; +import org.apache.pulsar.client.util.ExecutorProvider; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; @@ -217,4 +219,34 @@ public void testResourceCleanup() throws PulsarClientException { assertFalse(eventLoopGroup.isShutdown()); } } + + @Test + public void testInitializingWithExecutorProviders() throws PulsarClientException { + ClientConfigurationData conf = clientImpl.conf; + @Cleanup("shutdownNow") + ExecutorProvider executorProvider = new ExecutorProvider(2, "shared-executor"); + @Cleanup + PulsarClientImpl client2 = PulsarClientImpl.builder().conf(conf) + .internalExecutorProvider(executorProvider) + .externalExecutorProvider(executorProvider) + .build(); + @Cleanup + PulsarClientImpl client3 = PulsarClientImpl.builder().conf(conf) + .internalExecutorProvider(executorProvider) + .externalExecutorProvider(executorProvider) + .build(); + } + + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "Both externalExecutorProvider and internalExecutorProvider must be " + + "specified or unspecified.") + public void testBothExecutorProvidersMustBeSpecified() throws PulsarClientException { + ClientConfigurationData conf = clientImpl.conf; + @Cleanup("shutdownNow") + ExecutorProvider executorProvider = new ExecutorProvider(2, "shared-executor"); + @Cleanup + PulsarClientImpl client2 = PulsarClientImpl.builder().conf(conf) + .internalExecutorProvider(executorProvider) + .build(); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationOAuth2Test.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationOAuth2Test.java index ac14dd2aee105..3ae578c34845c 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationOAuth2Test.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/AuthenticationOAuth2Test.java @@ -86,6 +86,19 @@ public void testConfigure() throws Exception { params.put("privateKey", "data:base64,e30="); params.put("issuerUrl", "http://localhost"); params.put("audience", "http://localhost"); + params.put("scope", "http://localhost"); + ObjectMapper mapper = new ObjectMapper(); + String authParams = mapper.writeValueAsString(params); + this.auth.configure(authParams); + assertNotNull(this.auth.flow); + } + + @Test + public void testConfigureWithoutOptionalParams() throws Exception { + Map params = new HashMap<>(); + params.put("type", "client_credentials"); + params.put("privateKey", "data:base64,e30="); + params.put("issuerUrl", "http://localhost"); ObjectMapper mapper = new ObjectMapper(); String authParams = mapper.writeValueAsString(params); this.auth.configure(authParams); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClientTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClientTest.java index 1617359ad08a9..da70d6cd58510 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClientTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/auth/oauth2/protocol/TokenClientTest.java @@ -47,19 +47,13 @@ public void exchangeClientCredentialsSuccessByScopeTest() throws DefaultAsyncHttpClient defaultAsyncHttpClient = mock(DefaultAsyncHttpClient.class); URL url = new URL("http://localhost"); TokenClient tokenClient = new TokenClient(url, defaultAsyncHttpClient); - Map bodyMap = new TreeMap<>(); ClientCredentialsExchangeRequest request = ClientCredentialsExchangeRequest.builder() .audience("test-audience") .clientId("test-client-id") .clientSecret("test-client-secret") .scope("test-scope") .build(); - bodyMap.put("grant_type", "client_credentials"); - bodyMap.put("client_id", request.getClientId()); - bodyMap.put("client_secret", request.getClientSecret()); - bodyMap.put("audience", request.getAudience()); - bodyMap.put("scope", request.getScope()); - String body = tokenClient.buildClientCredentialsBody(bodyMap); + String body = tokenClient.buildClientCredentialsBody(request); BoundRequestBuilder boundRequestBuilder = mock(BoundRequestBuilder.class); Response response = mock(Response.class); ListenableFuture listenableFuture = mock(ListenableFuture.class); @@ -80,22 +74,16 @@ public void exchangeClientCredentialsSuccessByScopeTest() throws @Test @SuppressWarnings("unchecked") - public void exchangeClientCredentialsSuccessByNoScopeTest() throws + public void exchangeClientCredentialsSuccessWithoutOptionalClientCredentialsTest() throws IOException, TokenExchangeException, ExecutionException, InterruptedException { DefaultAsyncHttpClient defaultAsyncHttpClient = mock(DefaultAsyncHttpClient.class); URL url = new URL("http://localhost"); TokenClient tokenClient = new TokenClient(url, defaultAsyncHttpClient); - Map bodyMap = new TreeMap<>(); ClientCredentialsExchangeRequest request = ClientCredentialsExchangeRequest.builder() - .audience("test-audience") .clientId("test-client-id") .clientSecret("test-client-secret") .build(); - bodyMap.put("grant_type", "client_credentials"); - bodyMap.put("client_id", request.getClientId()); - bodyMap.put("client_secret", request.getClientSecret()); - bodyMap.put("audience", request.getAudience()); - String body = tokenClient.buildClientCredentialsBody(bodyMap); + String body = tokenClient.buildClientCredentialsBody(request); BoundRequestBuilder boundRequestBuilder = mock(BoundRequestBuilder.class); Response response = mock(Response.class); ListenableFuture listenableFuture = mock(ListenableFuture.class); diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/conf/ClientConfigurationDataTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/conf/ClientConfigurationDataTest.java new file mode 100644 index 0000000000000..c817ec996d480 --- /dev/null +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/conf/ClientConfigurationDataTest.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.client.impl.conf; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.testng.Assert; +import org.testng.annotations.Test; + +/** + * Unit test {@link ClientConfigurationData}. + */ +public class ClientConfigurationDataTest { + + private final ObjectWriter w; + + { + ObjectMapper m = new ObjectMapper(); + m.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + w = m.writer(); + } + + + @Test + public void testDoNotPrintSensitiveInfo() throws JsonProcessingException { + ClientConfigurationData clientConfigurationData = new ClientConfigurationData(); + clientConfigurationData.setTlsTrustStorePassword("xxxx"); + clientConfigurationData.setSocks5ProxyPassword("yyyy"); + clientConfigurationData.setAuthentication(new AuthenticationToken("zzzz")); + String s = w.writeValueAsString(clientConfigurationData); + Assert.assertFalse(s.contains("xxxx")); + Assert.assertFalse(s.contains("yyyy")); + Assert.assertFalse(s.contains("zzzz")); + } + +} diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/AvroSchemaTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/AvroSchemaTest.java index 00cbbdd6ad201..ed2c8597ded09 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/AvroSchemaTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/AvroSchemaTest.java @@ -32,17 +32,20 @@ import java.math.BigDecimal; import java.time.Instant; import java.time.LocalDate; +import java.time.LocalDateTime; import java.time.LocalTime; import java.time.temporal.ChronoUnit; import java.util.Arrays; import java.util.UUID; - +import lombok.AllArgsConstructor; import lombok.Data; +import lombok.NoArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.avro.Schema; import org.apache.avro.SchemaValidationException; import org.apache.avro.SchemaValidator; import org.apache.avro.SchemaValidatorBuilder; +import org.apache.avro.data.TimeConversions; import org.apache.avro.io.BinaryEncoder; import org.apache.avro.io.BufferedBinaryEncoder; import org.apache.avro.reflect.AvroDefault; @@ -438,4 +441,133 @@ public void testAvroUUID() { assertEquals(pojo1.uid, pojo2.uid); } + static class MyBigDecimalPojo { + public BigDecimal value1; + @org.apache.avro.reflect.AvroSchema("{\n" + + " \"type\": \"bytes\",\n" + + " \"logicalType\": \"decimal\",\n" + + " \"precision\": 4,\n" + + " \"scale\": 2\n" + + "}") + public BigDecimal value2; + } + + @Test + public void testAvroBigDecimal() { + org.apache.pulsar.client.api.Schema schema = + org.apache.pulsar.client.api.Schema.AVRO(MyBigDecimalPojo.class); + MyBigDecimalPojo myBigDecimalPojo = new MyBigDecimalPojo(); + myBigDecimalPojo.value1 = new BigDecimal("10.21"); + myBigDecimalPojo.value2 = new BigDecimal("10.22"); + MyBigDecimalPojo pojo2 = schema.decode(schema.encode(myBigDecimalPojo)); + assertEquals(pojo2.value1, myBigDecimalPojo.value1); + assertEquals(pojo2.value2, myBigDecimalPojo.value2); + } + + + @Data + @AllArgsConstructor + @NoArgsConstructor + private static class TimestampPojo { + Instant value; + } + + @Test + public void testTimestampWithJsr310Conversion() { + AvroSchema schema = AvroSchema.of(TimestampPojo.class); + Assert.assertEquals( + schema.getAvroSchema().getFields().get(0).schema().getTypes().get(1).getLogicalType().getName(), + new TimeConversions.TimestampMicrosConversion().getLogicalTypeName()); + + AvroSchema schema2 = AvroSchema.of(SchemaDefinition.builder() + .withPojo(TimestampPojo.class).withJSR310ConversionEnabled(true).build()); + Assert.assertEquals( + schema2.getAvroSchema().getFields().get(0).schema().getTypes().get(1).getLogicalType().getName(), + new TimeConversions.TimestampMillisConversion().getLogicalTypeName()); + } + + @Test + public void testTimestampWithJsonDef(){ + AvroSchema schemaWithPojo = AvroSchema.of(SchemaDefinition.builder() + .withPojo(TimestampPojo.class) + .withJSR310ConversionEnabled(false).build()); + + TimestampPojo timestampPojo = new TimestampPojo(Instant.parse("2022-06-10T12:38:59.039084Z")); + byte[] encode = schemaWithPojo.encode(timestampPojo); + TimestampPojo decodeWithPojo = schemaWithPojo.decode(encode); + + Assert.assertEquals(decodeWithPojo, timestampPojo); + + String schemaDefinition = new String(schemaWithPojo.schemaInfo.getSchema()); + AvroSchema schemaWithJsonDef = AvroSchema.of(SchemaDefinition.builder() + .withJsonDef(schemaDefinition) + .withClassLoader(TimestampPojo.class.getClassLoader()) + .withJSR310ConversionEnabled(false).build()); + + TimestampPojo decodeWithJson = schemaWithJsonDef.decode(encode); + + Assert.assertEquals(decodeWithJson, decodeWithPojo); + Assert.assertEquals(Instant.class, decodeWithJson.getValue().getClass()); + + AvroSchema schemaWithJsonDefNoClassLoader = AvroSchema.of(SchemaDefinition.builder() + .withJsonDef(schemaDefinition) + .withJSR310ConversionEnabled(false).build()); + + TimestampPojo decodeWithJsonNoClassLoader = schemaWithJsonDefNoClassLoader.decode(encode); + Assert.assertNotEquals(decodeWithJsonNoClassLoader, decodeWithPojo); + Assert.assertNotEquals(Instant.class, decodeWithJsonNoClassLoader.getValue().getClass()); + } + + @Test + public void testTimestampWithJsonDefAndJSR310ConversionEnabled(){ + AvroSchema schemaWithPojo = AvroSchema.of(SchemaDefinition.builder() + .withPojo(TimestampPojo.class) + .withJSR310ConversionEnabled(true).build()); + + TimestampPojo timestampPojo = new TimestampPojo(Instant.parse("2022-06-10T12:38:59.039084Z")); + byte[] encode = schemaWithPojo.encode(timestampPojo); + TimestampPojo decodeWithPojo = schemaWithPojo.decode(encode); + + Assert.assertNotEquals(decodeWithPojo, timestampPojo); + + String schemaDefinition = new String(schemaWithPojo.schemaInfo.getSchema()); + AvroSchema schemaWithJsonDef = AvroSchema.of(SchemaDefinition.builder() + .withJsonDef(schemaDefinition) + .withClassLoader(TimestampPojo.class.getClassLoader()) + .withJSR310ConversionEnabled(true).build()); + + TimestampPojo decodeWithJson = schemaWithJsonDef.decode(encode); + + Assert.assertEquals(decodeWithJson, decodeWithPojo); + Assert.assertEquals(Instant.class, decodeWithJson.getValue().getClass()); + + AvroSchema schemaWithJsonDefNoClassLoader = AvroSchema.of(SchemaDefinition.builder() + .withJsonDef(schemaDefinition) + .withJSR310ConversionEnabled(true).build()); + + TimestampPojo decodeWithJsonNoClassLoader = schemaWithJsonDefNoClassLoader.decode(encode); + Assert.assertNotEquals(decodeWithJsonNoClassLoader, decodeWithPojo); + Assert.assertNotEquals(Instant.class, decodeWithJsonNoClassLoader.getValue().getClass()); + } + + @Data + @AllArgsConstructor + @NoArgsConstructor + private static class LocalDateTimePojo { + LocalDateTime value; + } + + @Test + public void testLocalDateTime() { + SchemaDefinition schemaDefinition = + SchemaDefinition.builder().withPojo(LocalDateTimePojo.class) + .withJSR310ConversionEnabled(true).build(); + + AvroSchema avroSchema = AvroSchema.of(schemaDefinition); + LocalDateTime now = LocalDateTime.now(); + byte[] bytes = avroSchema.encode(new LocalDateTimePojo(now)); + + LocalDateTimePojo pojo = avroSchema.decode(bytes); + assertEquals(pojo.getValue().truncatedTo(ChronoUnit.MILLIS), now.truncatedTo(ChronoUnit.MILLIS)); + } } diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaBuilderTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaBuilderTest.java index fa88e144a317f..a1530864c9269 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaBuilderTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaBuilderTest.java @@ -20,11 +20,15 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.testng.Assert.assertEquals; - import lombok.Data; import org.apache.avro.reflect.Nullable; import org.apache.pulsar.client.api.Schema; -import org.apache.pulsar.client.api.schema.*; +import org.apache.pulsar.client.api.schema.GenericRecord; +import org.apache.pulsar.client.api.schema.GenericRecordBuilder; +import org.apache.pulsar.client.api.schema.GenericSchema; +import org.apache.pulsar.client.api.schema.RecordSchemaBuilder; +import org.apache.pulsar.client.api.schema.SchemaBuilder; +import org.apache.pulsar.client.api.schema.SchemaDefinition; import org.apache.pulsar.client.impl.schema.reader.MultiVersionAvroReader; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaInfoTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaInfoTest.java index f96e84e158345..7ed5406b76e94 100644 --- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaInfoTest.java +++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/schema/SchemaInfoTest.java @@ -18,6 +18,10 @@ */ package org.apache.pulsar.client.impl.schema; +import static org.testng.Assert.assertEquals; +import java.util.HashMap; +import java.util.Map; + import com.google.common.collect.Maps; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.common.schema.KeyValueEncodingType; @@ -26,11 +30,6 @@ import org.testng.annotations.DataProvider; import org.testng.annotations.Test; -import java.util.HashMap; -import java.util.Map; - -import static org.testng.Assert.assertEquals; - /** * Unit test {@link org.apache.pulsar.common.schema.SchemaInfo}. */ @@ -40,6 +39,7 @@ public class SchemaInfoTest { + " \"name\": \"INT32\",\n" + " \"schema\": \"\",\n" + " \"type\": \"INT32\",\n" + + " \"timestamp\": 0,\n" + " \"properties\": {}\n" + "}"; @@ -47,6 +47,7 @@ public class SchemaInfoTest { + " \"name\": \"String\",\n" + " \"schema\": \"\",\n" + " \"type\": \"STRING\",\n" + + " \"timestamp\": 0,\n" + " \"properties\": {}\n" + "}"; @@ -64,6 +65,7 @@ public class SchemaInfoTest { + " ]\n" + " },\n" + " \"type\": \"JSON\",\n" + + " \"timestamp\": 0,\n" + " \"properties\": {\n" + " \"__alwaysAllowNull\": \"true\",\n" + " \"__jsr310ConversionEnabled\": \"false\",\n" @@ -136,6 +138,7 @@ public class SchemaInfoTest { + " ]\n" + " },\n" + " \"type\": \"AVRO\",\n" + + " \"timestamp\": 0,\n" + " \"properties\": {\n" + " \"__alwaysAllowNull\": \"false\",\n" + " \"__jsr310ConversionEnabled\": \"false\",\n" @@ -211,6 +214,7 @@ public class SchemaInfoTest { + " ]\n" + " },\n" + " \"type\": \"AVRO\",\n" + + " \"timestamp\": 0,\n" + " \"properties\": {\n" + " \"__alwaysAllowNull\": \"false\",\n" + " \"__jsr310ConversionEnabled\": \"false\",\n" @@ -233,6 +237,7 @@ public class SchemaInfoTest { + " ]\n" + " },\n" + " \"type\": \"JSON\",\n" + + " \"timestamp\": 0,\n" + " \"properties\": {\n" + " \"__alwaysAllowNull\": \"true\",\n" + " \"__jsr310ConversionEnabled\": \"false\",\n" @@ -243,6 +248,7 @@ public class SchemaInfoTest { + " }\n" + " },\n" + " \"type\": \"KEY_VALUE\",\n" + + " \"timestamp\": 0,\n" + " \"properties\": {\n" + " \"key.schema.name\": \"\",\n" + " \"key.schema.properties\": \"{\\\"__alwaysAllowNull\\\":\\\"false\\\",\\\"__jsr310ConversionEnabled\\\":\\\"false\\\",\\\"foo1\\\":\\\"foo-value1\\\",\\\"foo2\\\":\\\"foo-value2\\\",\\\"foo3\\\":\\\"foo-value3\\\"}\",\n" @@ -289,7 +295,7 @@ public static class SchemaInfoBuilderTest { @Test public void testUnsetProperties() { - final SchemaInfo schemaInfo = SchemaInfoImpl.builder() + final SchemaInfo schemaInfo = SchemaInfo.builder() .type(SchemaType.STRING) .schema(new byte[0]) .name("string") @@ -305,7 +311,7 @@ public void testUnsetProperties() { public void testSetProperties() { final Map map = Maps.newHashMap(); map.put("test", "value"); - final SchemaInfo schemaInfo = SchemaInfoImpl.builder() + final SchemaInfo schemaInfo = SchemaInfo.builder() .type(SchemaType.STRING) .schema(new byte[0]) .name("string") @@ -323,7 +329,7 @@ public void testNullPropertyValue() { final Map map = new HashMap<>(); map.put("key", null); - SchemaInfo si = SchemaInfoImpl.builder() + SchemaInfo si = SchemaInfo.builder() .name("INT32") .schema(new byte[0]) .type(SchemaType.INT32) diff --git a/pulsar-common/pom.xml b/pulsar-common/pom.xml index 225c260c4e234..01fe5091bdbed 100644 --- a/pulsar-common/pom.xml +++ b/pulsar-common/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -72,6 +72,11 @@ netty-handler
+ + io.netty + netty-resolver-dns + + io.netty netty-transport-native-epoll @@ -131,11 +136,6 @@ netty-codec-haproxy - - org.eclipse.jetty - jetty-util - - org.apache.commons commons-lang3 diff --git a/pulsar-common/src/main/java-templates/org/apache/pulsar/PulsarVersion.java b/pulsar-common/src/main/java-templates/org/apache/pulsar/PulsarVersion.java index 07f97cd611e2b..e1e57e1909d7e 100644 --- a/pulsar-common/src/main/java-templates/org/apache/pulsar/PulsarVersion.java +++ b/pulsar-common/src/main/java-templates/org/apache/pulsar/PulsarVersion.java @@ -82,6 +82,10 @@ public static String getGitSha() { } } + public static String getGitBranch() { + return "${git.branch}"; + } + public static String getBuildUser() { String email = "${git.build.user.email}"; String name = "${git.build.user.name}"; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/client/api/url/URL.java b/pulsar-common/src/main/java/org/apache/pulsar/client/api/url/URL.java index b2037377d2f41..3286900ecb792 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/client/api/url/URL.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/client/api/url/URL.java @@ -42,6 +42,17 @@ public URL(String spec) } } + /** + * Creates java.net.URL with data protocol support. + * + * @param spec the input URL as String + * @return java.net.URL instance + */ + public static final java.net.URL createURL(String spec) + throws MalformedURLException, URISyntaxException, InstantiationException, IllegalAccessException { + return new URL(spec).url; + } + public URLConnection openConnection() throws IOException { return this.url.openConnection(); } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaInfoImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaInfoImpl.java index ca8b6ccf9bd28..d67dc5f29e1c8 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaInfoImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/client/impl/schema/SchemaInfoImpl.java @@ -59,6 +59,11 @@ public class SchemaInfoImpl implements SchemaInfo { */ private SchemaType type; + /** + * The created time of schema. + */ + private long timestamp; + /** * Additional properties of the schema definition (implementation defined). */ diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java b/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java index d093628ccd105..483b5a30a0abd 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessage.java @@ -121,4 +121,33 @@ public interface RawMessage { * @return true if the key is base64 encoded, false otherwise */ boolean hasBase64EncodedKey(); + + /** + * Get uuid of chunked message. + * + * @return uuid + */ + String getUUID(); + + /** + * Get chunkId of chunked message. + * + * @return chunkId + */ + int getChunkId(); + + /** + * Get chunk num of chunked message. + * + * @return chunk num + */ + int getNumChunksFromMsg(); + + /** + * Get chunk message total size in bytes. + * + * @return chunked message total size in bytes + */ + int getTotalChunkMsgSize(); + } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessageImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessageImpl.java index defc1b496b043..e3c1b4d064f18 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessageImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/api/raw/RawMessageImpl.java @@ -38,6 +38,7 @@ public class RawMessageImpl implements RawMessage { private ReferenceCountedMessageMetadata msgMetadata; private final SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata(); + private volatile boolean setSingleMessageMetadata; private ByteBuf payload; private static final Recycler RECYCLER = new Recycler() { @@ -58,6 +59,7 @@ public void release() { msgMetadata.release(); msgMetadata = null; singleMessageMetadata.clear(); + setSingleMessageMetadata = false; payload.release(); handle.recycle(this); @@ -73,6 +75,7 @@ public static RawMessage get(ReferenceCountedMessageMetadata msgMetadata, if (singleMessageMetadata != null) { msg.singleMessageMetadata.copyFrom(singleMessageMetadata); + msg.setSingleMessageMetadata = true; } msg.messageId.ledgerId = ledgerId; msg.messageId.entryId = entryId; @@ -81,9 +84,17 @@ public static RawMessage get(ReferenceCountedMessageMetadata msgMetadata, return msg; } + public RawMessage updatePayloadForChunkedMessage(ByteBuf chunkedTotalPayload) { + if (!msgMetadata.getMetadata().hasNumChunksFromMsg() || msgMetadata.getMetadata().getNumChunksFromMsg() <= 1) { + throw new RuntimeException("The update payload operation only support multi chunked messages."); + } + payload = chunkedTotalPayload; + return this; + } + @Override public Map getProperties() { - if (singleMessageMetadata != null && singleMessageMetadata.getPropertiesCount() > 0) { + if (setSingleMessageMetadata && singleMessageMetadata.getPropertiesCount() > 0) { return singleMessageMetadata.getPropertiesList().stream() .collect(Collectors.toMap(KeyValue::getKey, KeyValue::getValue, (oldValue,newValue) -> newValue)); @@ -112,7 +123,7 @@ public long getPublishTime() { @Override public long getEventTime() { - if (singleMessageMetadata != null && singleMessageMetadata.hasEventTime()) { + if (setSingleMessageMetadata && singleMessageMetadata.hasEventTime()) { return singleMessageMetadata.getEventTime(); } else if (msgMetadata.getMetadata().hasEventTime()) { return msgMetadata.getMetadata().getEventTime(); @@ -133,7 +144,7 @@ public String getProducerName() { @Override public Optional getKey() { - if (singleMessageMetadata != null && singleMessageMetadata.hasPartitionKey()) { + if (setSingleMessageMetadata && singleMessageMetadata.hasPartitionKey()) { return Optional.of(singleMessageMetadata.getPartitionKey()); } else if (msgMetadata.getMetadata().hasPartitionKey()){ return Optional.of(msgMetadata.getMetadata().getPartitionKey()); @@ -164,12 +175,48 @@ public Optional getKeyBytes() { @Override public boolean hasBase64EncodedKey() { - if (singleMessageMetadata != null) { + if (setSingleMessageMetadata) { return singleMessageMetadata.isPartitionKeyB64Encoded(); } return msgMetadata.getMetadata().isPartitionKeyB64Encoded(); } + @Override + public String getUUID() { + if (msgMetadata.getMetadata().hasUuid()) { + return msgMetadata.getMetadata().getUuid(); + } else { + return null; + } + } + + @Override + public int getChunkId() { + if (msgMetadata.getMetadata().hasChunkId()) { + return msgMetadata.getMetadata().getChunkId(); + } else { + return -1; + } + } + + @Override + public int getNumChunksFromMsg() { + if (msgMetadata.getMetadata().hasNumChunksFromMsg()) { + return msgMetadata.getMetadata().getNumChunksFromMsg(); + } else { + return -1; + } + } + + @Override + public int getTotalChunkMsgSize() { + if (msgMetadata.getMetadata().hasTotalChunkMsgSize()) { + return msgMetadata.getMetadata().getTotalChunkMsgSize(); + } else { + return -1; + } + } + public int getBatchSize() { return msgMetadata.getMetadata().getNumMessagesInBatch(); } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/events/EventsTopicNames.java b/pulsar-common/src/main/java/org/apache/pulsar/common/events/EventsTopicNames.java index 2aa9e122d63c5..f82c9ae8519a4 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/events/EventsTopicNames.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/events/EventsTopicNames.java @@ -49,6 +49,7 @@ public static boolean checkTopicIsEventsNames(TopicName topicName) { } public static boolean checkTopicIsTransactionCoordinatorAssign(TopicName topicName) { - return TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString().equals(topicName.toString()); + return topicName != null && topicName.toString() + .startsWith(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString()); } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java b/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java index 2cb9f23153bf3..67bc92acff3d9 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/naming/TopicName.java @@ -63,6 +63,9 @@ public TopicName load(String name) throws Exception { public static final TopicName TRANSACTION_COORDINATOR_ASSIGN = TopicName.get(TopicDomain.persistent.value(), NamespaceName.SYSTEM_NAMESPACE, "transaction_coordinator_assign"); + public static final TopicName TRANSACTION_COORDINATOR_LOG = TopicName.get(TopicDomain.persistent.value(), + NamespaceName.SYSTEM_NAMESPACE, "__transaction_log_"); + public static TopicName get(String domain, NamespaceName namespaceName, String topic) { String name = domain + "://" + namespaceName.toString() + '/' + topic; return TopicName.get(name); @@ -87,6 +90,14 @@ public static TopicName get(String topic) { } } + public static TopicName getPartitionedTopicName(String topic) { + TopicName topicName = TopicName.get(topic); + if (topicName.isPartitioned()) { + return TopicName.get(topicName.getPartitionedTopicName()); + } + return topicName; + } + public static boolean isValid(String topic) { try { get(topic); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/FileUtils.java b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/FileUtils.java index cc677302ed802..0bfdb806165e9 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/nar/FileUtils.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/nar/FileUtils.java @@ -30,6 +30,9 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; +import lombok.extern.slf4j.Slf4j; import org.slf4j.Logger; /** @@ -37,6 +40,7 @@ * operations. * */ +@Slf4j public class FileUtils { public static final long MILLIS_BETWEEN_ATTEMPTS = 50L; @@ -221,5 +225,21 @@ public static void sleepQuietly(final long millis) { /* do nothing */ } } + + public static boolean mayBeANarArchive(File jarFile) { + try (ZipFile zipFile = new ZipFile(jarFile);) { + ZipEntry entry = zipFile.getEntry("META-INF/bundled-dependencies"); + if (entry == null || !entry.isDirectory()) { + log.info("Jar file {} does not contain META-INF/bundled-dependencies, it is not a NAR file", jarFile); + return false; + } else { + log.info("Jar file {} contains META-INF/bundled-dependencies, it may be a NAR file", jarFile); + return true; + } + } catch (IOException err) { + log.info("Cannot safely detect if {} is a NAR archive", jarFile, err); + return true; + } + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/net/ServiceURI.java b/pulsar-common/src/main/java/org/apache/pulsar/common/net/ServiceURI.java index 16a070d41865c..f52afbcc886c4 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/net/ServiceURI.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/net/ServiceURI.java @@ -219,7 +219,7 @@ private static int getServicePort(String serviceName, String[] serviceInfos) { case BINARY_SERVICE: if (serviceInfos.length == 0) { port = BINARY_PORT; - } else if (serviceInfos.length == 1 && serviceInfos[0].toLowerCase().equals(SSL_SERVICE)) { + } else if (serviceInfos.length == 1 && serviceInfos[0].equalsIgnoreCase(SSL_SERVICE)) { port = BINARY_TLS_PORT; } else { throw new IllegalArgumentException("Invalid pulsar service : " + serviceName + "+" diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/NamespaceIsolationDataImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/NamespaceIsolationDataImpl.java index 5ca9d07c56558..4c2d99cc5d7bb 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/NamespaceIsolationDataImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/NamespaceIsolationDataImpl.java @@ -59,7 +59,7 @@ public class NamespaceIsolationDataImpl implements NamespaceIsolationData { private List primary; @ApiModelProperty( - name = "primary", + name = "secondary", value = "The list of secondary brokers for serving the list of namespaces in this isolation policy" ) private List secondary; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java index e1da6a1c73741..3d920902d8071 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/OffloadPoliciesImpl.java @@ -254,6 +254,11 @@ public void compatibleWithBrokerConfigFile(Properties properties) { setManagedLedgerOffloadDeletionLagInMillis( Long.parseLong(properties.getProperty(DELETION_LAG_NAME_IN_CONF_FILE))); } + + if (properties.containsKey("managedLedgerDataReadPriority")) { + setManagedLedgerOffloadedReadPriority( + OffloadedReadPriority.fromString(properties.getProperty("managedLedgerDataReadPriority"))); + } } public boolean driverSupported() { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/ConsumerStatsImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/ConsumerStatsImpl.java index 47f614889ca10..e4fd0958a34cc 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/ConsumerStatsImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/ConsumerStatsImpl.java @@ -45,6 +45,11 @@ public class ConsumerStatsImpl implements ConsumerStats { /** Total rate of messages redelivered by this consumer (msg/s). */ public double msgRateRedeliver; + /** + * Total rate of message ack(msg/s). + */ + public double messageAckRate; + /** Total chunked messages dispatched. */ public double chunkedMessageRate; @@ -109,6 +114,7 @@ public ConsumerStatsImpl add(ConsumerStatsImpl stats) { this.msgRateRedeliver += stats.msgRateRedeliver; this.availablePermits += stats.availablePermits; this.unackedMessages += stats.unackedMessages; + this.messageAckRate += stats.messageAckRate; this.blockedConsumerOnUnackedMsgs = stats.blockedConsumerOnUnackedMsgs; this.readPositionWhenJoining = stats.readPositionWhenJoining; return this; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java index 78781ac32b46b..2a124b9e574fb 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/SubscriptionStatsImpl.java @@ -46,6 +46,11 @@ public class SubscriptionStatsImpl implements SubscriptionStats { /** Total rate of messages redelivered on this subscription (msg/s). */ public double msgRateRedeliver; + /** + * Total rate of message ack(msg/s). + */ + public double messageAckRate; + /** Chunked message dispatch rate. */ public int chunkedMessageRate; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java index f3b3944b5fdc3..de016b0cf8f8a 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/stats/TopicStatsImpl.java @@ -76,6 +76,9 @@ public class TopicStatsImpl implements TopicStats { /** Get estimated total unconsumed or backlog size in bytes. */ public long backlogSize; + /** The number of times the publishing rate limit was triggered. */ + public long publishRateLimitedTimes; + /** Space used to store the offloaded messages for the topic/. */ public long offloadedStorageSize; @@ -160,6 +163,7 @@ public void reset() { this.lastOffloadLedgerId = 0; this.lastOffloadFailureTimeStamp = 0; this.lastOffloadSuccessTimeStamp = 0; + this.publishRateLimitedTimes = 0L; this.compaction.reset(); } @@ -182,6 +186,7 @@ public TopicStatsImpl add(TopicStats ts) { this.averageMsgSize = newAverageMsgSize; this.storageSize += stats.storageSize; this.backlogSize += stats.backlogSize; + this.publishRateLimitedTimes += stats.publishRateLimitedTimes; this.offloadedStorageSize += stats.offloadedStorageSize; this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges; this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java index 3c363e03bd85a..934e331bd6b47 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/Commands.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.Map; @@ -126,6 +127,10 @@ protected BaseCommand initialValue() throws Exception { } }; + // Return the last ProtocolVersion enum value + private static final int CURRENT_PROTOCOL_VERSION = + ProtocolVersion.values()[ProtocolVersion.values().length - 1].getValue(); + private static BaseCommand localCmd(BaseCommand.Type type) { return LOCAL_BASE_COMMAND.get() .clear() @@ -1727,6 +1732,9 @@ public static byte[] peekStickyKey(ByteBuf metadataAndPayload, String topic, Str if (metadata.hasOrderingKey()) { return metadata.getOrderingKey(); } else if (metadata.hasPartitionKey()) { + if (metadata.isPartitionKeyB64Encoded()) { + return Base64.getDecoder().decode(metadata.getPartitionKey()); + } return metadata.getPartitionKey().getBytes(StandardCharsets.UTF_8); } } catch (Throwable t) { @@ -1736,8 +1744,7 @@ public static byte[] peekStickyKey(ByteBuf metadataAndPayload, String topic, Str } public static int getCurrentProtocolVersion() { - // Return the last ProtocolVersion enum value - return ProtocolVersion.values()[ProtocolVersion.values().length - 1].getValue(); + return CURRENT_PROTOCOL_VERSION; } /** diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/PulsarHandler.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/PulsarHandler.java index 481517c794ab9..6a0b7bb1fa5f4 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/PulsarHandler.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/PulsarHandler.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.common.protocol; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import io.netty.channel.ChannelHandlerContext; import io.netty.util.concurrent.ScheduledFuture; import java.net.SocketAddress; @@ -36,7 +37,7 @@ public abstract class PulsarHandler extends PulsarDecoder { protected SocketAddress remoteAddress; private int remoteEndpointProtocolVersion = ProtocolVersion.v0.getValue(); private final long keepAliveIntervalSeconds; - private boolean waitingForPingResponse = false; + private volatile boolean waitingForPingResponse = false; private ScheduledFuture keepAliveTask; public int getRemoteEndpointProtocolVersion() { @@ -65,8 +66,9 @@ public void channelActive(ChannelHandlerContext ctx) throws Exception { log.debug("[{}] Scheduling keep-alive task every {} s", ctx.channel(), keepAliveIntervalSeconds); } if (keepAliveIntervalSeconds > 0) { - this.keepAliveTask = ctx.executor().scheduleAtFixedRate(this::handleKeepAliveTimeout, - keepAliveIntervalSeconds, keepAliveIntervalSeconds, TimeUnit.SECONDS); + this.keepAliveTask = ctx.executor() + .scheduleAtFixedRate(catchingAndLoggingThrowables(this::handleKeepAliveTimeout), + keepAliveIntervalSeconds, keepAliveIntervalSeconds, TimeUnit.SECONDS); } } @@ -81,7 +83,14 @@ final protected void handlePing(CommandPing ping) { if (log.isDebugEnabled()) { log.debug("[{}] Replying back to ping message", ctx.channel()); } - ctx.writeAndFlush(Commands.newPong()); + ctx.writeAndFlush(Commands.newPong()) + .addListener(future -> { + if (!future.isSuccess()) { + log.warn("[{}] Forcing connection to close since cannot send a pong message.", + ctx.channel(), future.cause()); + ctx.close(); + } + }); } @Override @@ -108,7 +117,14 @@ private void handleKeepAliveTimeout() { log.debug("[{}] Sending ping message", ctx.channel()); } waitingForPingResponse = true; - ctx.writeAndFlush(Commands.newPing()); + ctx.writeAndFlush(Commands.newPing()) + .addListener(future -> { + if (!future.isSuccess()) { + log.warn("[{}] Forcing connection to close since cannot send a ping message.", + ctx.channel(), future.cause()); + ctx.close(); + } + }); } else { if (log.isDebugEnabled()) { log.debug("[{}] Peer doesn't support keep-alive", ctx.channel()); @@ -116,7 +132,7 @@ private void handleKeepAliveTimeout() { } } - protected void cancelKeepAliveTask() { + public void cancelKeepAliveTask() { if (keepAliveTask != null) { keepAliveTask.cancel(false); keepAliveTask = null; diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaData.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaData.java index d5b440589288e..5c00f06d9698c 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaData.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaData.java @@ -22,7 +22,6 @@ import java.util.Map; import lombok.Builder; import lombok.Data; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; @@ -46,7 +45,7 @@ public class SchemaData { * @return the converted schema info. */ public SchemaInfo toSchemaInfo() { - return SchemaInfoImpl.builder() + return SchemaInfo.builder() .name("") .type(type) .schema(data) diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaHash.java b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaHash.java index 40220e6047a3b..8bbc18fbb703c 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaHash.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/protocol/schema/SchemaHash.java @@ -54,7 +54,12 @@ public static SchemaHash of(SchemaData schemaData) { return of(schemaData.getData(), schemaData.getType()); } - private static SchemaHash of(byte[] schemaBytes, SchemaType schemaType) { + public static SchemaHash of(SchemaInfo schemaInfo) { + return of(schemaInfo == null ? new byte[0] : schemaInfo.getSchema(), + schemaInfo == null ? null : schemaInfo.getType()); + } + + public static SchemaHash of(byte[] schemaBytes, SchemaType schemaType) { return new SchemaHash(hashFunction.hashBytes(schemaBytes), schemaType); } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java b/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java index 6780563959b3b..83b14060a99a2 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/stats/JvmMetrics.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.common.stats; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import io.netty.buffer.PoolArenaMetric; @@ -96,7 +97,7 @@ private static String detectGCType() { public JvmMetrics(ScheduledExecutorService executor, String componentName, JvmGCMetricsLogger gcLogger) { this.gcLogger = gcLogger; if (executor != null) { - executor.scheduleAtFixedRate(gcLogger::refresh, 0, 1, TimeUnit.MINUTES); + executor.scheduleAtFixedRate(catchingAndLoggingThrowables(gcLogger::refresh), 0, 1, TimeUnit.MINUTES); } this.componentName = componentName; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/tls/TlsHostnameVerifier.java b/pulsar-common/src/main/java/org/apache/pulsar/common/tls/TlsHostnameVerifier.java index 9a2964d123801..0735013ad7f64 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/tls/TlsHostnameVerifier.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/tls/TlsHostnameVerifier.java @@ -206,9 +206,7 @@ private static boolean matchIdentity(final String host, final String identity, if (strict) { final String remainder = host.substring( prefix.length(), host.length() - suffix.length()); - if (remainder.contains(".")) { - return false; - } + return !remainder.contains("."); } return true; } @@ -289,7 +287,7 @@ static List getSubjectAltNames(final X509Certificate cert) { if (type != null) { final Object o = entry.get(1); if (o instanceof String) { - result.add(new SubjectName((String) o, type.intValue())); + result.add(new SubjectName((String) o, type)); } else if (o instanceof byte[]) { // TODO ASN.1 DER encoded form } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/ClassLoaderUtils.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/ClassLoaderUtils.java index 0e1e1884aaa8e..69e4c6319c0a6 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/ClassLoaderUtils.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/ClassLoaderUtils.java @@ -18,16 +18,20 @@ */ package org.apache.pulsar.common.util; +import java.io.Closeable; import java.io.File; +import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; import java.security.AccessController; import java.security.PrivilegedAction; +import lombok.extern.slf4j.Slf4j; /** * Helper methods wrt Classloading. */ +@Slf4j public class ClassLoaderUtils { /** * Load a jar. @@ -76,4 +80,14 @@ public static void implementsClass(String className, Class klass, ClassLoader String.format("%s does not implement %s", className, klass.getName())); } } + + public static void closeClassLoader(ClassLoader classLoader) { + if (classLoader instanceof Closeable) { + try { + ((Closeable) classLoader).close(); + } catch (IOException e) { + log.error("Error closing classloader {}", classLoader, e); + } + } + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/DefaultSslContextBuilder.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/DefaultSslContextBuilder.java index b1e8a14ff95f6..2e67b02f90b1e 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/DefaultSslContextBuilder.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/DefaultSslContextBuilder.java @@ -18,11 +18,8 @@ */ package org.apache.pulsar.common.util; -import java.io.FileNotFoundException; -import java.io.IOException; import java.security.GeneralSecurityException; import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLException; @SuppressWarnings("checkstyle:JavadocType") public class DefaultSslContextBuilder extends SslContextAutoRefreshBuilder { @@ -31,23 +28,37 @@ public class DefaultSslContextBuilder extends SslContextAutoRefreshBuilder void update(Map properties, T obj) throws Ille f.setAccessible(true); String v = properties.get(f.getName()); if (!StringUtils.isBlank(v)) { - f.set(obj, value(v, f)); + f.set(obj, value(trim(v), f)); } else { setEmptyValue(v, f, obj); } @@ -212,7 +213,7 @@ public static void setEmptyValue(String strValue, Field field, T obj) if (field.getType().equals(List.class)) { field.set(obj, Lists.newArrayList()); } else if (field.getType().equals(Set.class)) { - field.set(obj, Sets.newHashSet()); + field.set(obj, new LinkedHashSet<>()); } else if (field.getType().equals(Optional.class)) { field.set(obj, Optional.empty()); } else { @@ -316,7 +317,7 @@ public static Float stringToFloat(String val) { public static List stringToList(String val, Class type) { String[] tokens = trim(val).split(","); return Arrays.stream(tokens).map(t -> { - return convert(t, type); + return convert(trim(t), type); }).collect(Collectors.toList()); } @@ -332,8 +333,8 @@ public static List stringToList(String val, Class type) { public static Set stringToSet(String val, Class type) { String[] tokens = trim(val).split(","); return Arrays.stream(tokens).map(t -> { - return convert(t, type); - }).collect(Collectors.toSet()); + return convert(trim(t), type); + }).collect(Collectors.toCollection(LinkedHashSet::new)); } private static Map stringToMap(String strValue, Class keyType, Class valueType) { @@ -343,7 +344,7 @@ private static Map stringToMap(String strValue, Class keyType, C String[] keyValue = trim(token).split("="); checkArgument(keyValue.length == 2, strValue + " map-value is not in correct format key1=value,key2=value2"); - map.put(convert(keyValue[0], keyType), convert(keyValue[1], valueType)); + map.put(convert(trim(keyValue[0]), keyType), convert(trim(keyValue[1]), valueType)); } return map; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java index 3d2fcae918f24..51c6087558ab8 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/FutureUtil.java @@ -19,6 +19,7 @@ package org.apache.pulsar.common.util; import java.time.Duration; +import java.util.Collection; import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; @@ -28,7 +29,9 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * This class is aimed at simplifying work with {@code CompletableFuture}. @@ -36,35 +39,104 @@ public class FutureUtil { /** - * Return a future that represents the completion of the futures in the provided list. + * Return a future that represents the completion of the futures in the provided List. + * This method with the List parameter is needed to keep compatibility with external + * applications that are compiled with Pulsar < 2.10.0. * * @param futures futures to wait for * @return a new CompletableFuture that is completed when all of the given CompletableFutures complete */ + @Deprecated public static CompletableFuture waitForAll(List> futures) { return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); } /** - * Return a future that represents the completion of any future in the provided list. + * Return a future that represents the completion of the futures in the provided Collection. + * + * @param futures futures to wait for + * @return a new CompletableFuture that is completed when all of the given CompletableFutures complete + */ + public static CompletableFuture waitForAll(Collection> futures) { + return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + } + + /** + * Return a future that represents the completion of any future in the provided List. + * This method with the List parameter is needed to keep compatibility with external + * applications that are compiled with Pulsar < 2.10.0. * * @param futures futures to wait any * @return a new CompletableFuture that is completed when any of the given CompletableFutures complete */ + @Deprecated public static CompletableFuture waitForAny(List> futures) { return CompletableFuture.anyOf(futures.toArray(new CompletableFuture[0])); } + /** + * Return a future that represents the completion of any future in the provided Collection. + * + * @param futures futures to wait any + * @return a new CompletableFuture that is completed when any of the given CompletableFutures complete + */ + public static CompletableFuture waitForAny(Collection> futures) { + return CompletableFuture.anyOf(futures.toArray(new CompletableFuture[0])); + } + + /** + * Return a future that represents the completion of any future that match the predicate in the provided Collection. + * + * @param futures futures to wait any + * @param tester if any future match the predicate + * @return a new CompletableFuture that is completed when any of the given CompletableFutures match the tester + */ + public static CompletableFuture> waitForAny(Collection> futures, + Predicate tester) { + return waitForAny(futures).thenCompose(v -> { + if (tester.test(v)) { + futures.forEach(f -> { + if (!f.isDone()) { + f.cancel(true); + } + }); + return CompletableFuture.completedFuture(Optional.of(v)); + } + Collection> doneFutures = futures.stream() + .filter(f -> f.isDone()) + .collect(Collectors.toList()); + futures.removeAll(doneFutures); + Optional value = doneFutures.stream() + .filter(f -> !f.isCompletedExceptionally()) + .map(CompletableFuture::join) + .filter(tester) + .findFirst(); + if (!value.isPresent()) { + if (futures.size() == 0) { + return CompletableFuture.completedFuture(Optional.empty()); + } + return waitForAny(futures, tester); + } + futures.forEach(f -> { + if (!f.isDone()) { + f.cancel(true); + } + }); + return CompletableFuture.completedFuture(Optional.of(value.get())); + }); + } + /** - * Return a future that represents the completion of the futures in the provided list. + * Return a future that represents the completion of the futures in the provided Collection. * The future will support {@link CompletableFuture#cancel(boolean)}. It will cancel * all unfinished futures when the future gets cancelled. * * @param futures futures to wait for * @return a new CompletableFuture that is completed when all of the given CompletableFutures complete */ - public static CompletableFuture waitForAllAndSupportCancel(List> futures) { + public static CompletableFuture waitForAllAndSupportCancel( + Collection> futures) { CompletableFuture[] futuresArray = futures.toArray(new CompletableFuture[0]); CompletableFuture combinedFuture = CompletableFuture.allOf(futuresArray); whenCancelledOrTimedOut(combinedFuture, () -> { @@ -187,4 +259,18 @@ public static Optional getException(CompletableFuture future) } return Optional.empty(); } + + /** + * Wrap throwable exception to CompletionException if that exception is not an instance of CompletionException. + * + * @param throwable Exception + * @return CompletionException + */ + public static CompletionException wrapToCompletionException(Throwable throwable) { + if (throwable instanceof CompletionException) { + return (CompletionException) throwable; + } else { + return new CompletionException(throwable); + } + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyClientSslContextRefresher.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyClientSslContextRefresher.java index 560746df7f608..9e050b7058d8b 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyClientSslContextRefresher.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyClientSslContextRefresher.java @@ -19,10 +19,12 @@ package org.apache.pulsar.common.util; import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslProvider; import java.io.FileNotFoundException; import java.io.IOException; import java.security.GeneralSecurityException; import java.security.cert.X509Certificate; +import java.util.Set; import javax.net.ssl.SSLException; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.client.api.AuthenticationDataProvider; @@ -33,19 +35,32 @@ @Slf4j public class NettyClientSslContextRefresher extends SslContextAutoRefreshBuilder { private volatile SslContext sslNettyContext; - private boolean tlsAllowInsecureConnection; + private final boolean tlsAllowInsecureConnection; protected final FileModifiedTimeUpdater tlsTrustCertsFilePath; - private AuthenticationDataProvider authData; + protected final FileModifiedTimeUpdater tlsCertsFilePath; + protected final FileModifiedTimeUpdater tlsPrivateKeyFilePath; + private final AuthenticationDataProvider authData; + private final SslProvider sslProvider; + private final Set ciphers; + private final Set protocols; - public NettyClientSslContextRefresher(boolean allowInsecure, + public NettyClientSslContextRefresher(SslProvider sslProvider, boolean allowInsecure, String trustCertsFilePath, AuthenticationDataProvider authData, - long delayInSeconds) - throws IOException, GeneralSecurityException { + Set ciphers, + Set protocols, + long delayInSeconds) { super(delayInSeconds); this.tlsAllowInsecureConnection = allowInsecure; this.tlsTrustCertsFilePath = new FileModifiedTimeUpdater(trustCertsFilePath); this.authData = authData; + this.tlsCertsFilePath = new FileModifiedTimeUpdater( + authData != null ? authData.getTlsCerificateFilePath() : null); + this.tlsPrivateKeyFilePath = new FileModifiedTimeUpdater( + authData != null ? authData.getTlsPrivateKeyFilePath() : null); + this.sslProvider = sslProvider; + this.ciphers = ciphers; + this.protocols = protocols; } @Override @@ -53,15 +68,16 @@ public synchronized SslContext update() throws SSLException, FileNotFoundException, GeneralSecurityException, IOException { if (authData != null && authData.hasDataForTls()) { this.sslNettyContext = authData.getTlsTrustStoreStream() == null - ? SecurityUtility.createNettySslContextForClient(this.tlsAllowInsecureConnection, - tlsTrustCertsFilePath.getFileName(), (X509Certificate[]) authData.getTlsCertificates(), - authData.getTlsPrivateKey()) - : SecurityUtility.createNettySslContextForClient(this.tlsAllowInsecureConnection, - authData.getTlsTrustStoreStream(), (X509Certificate[]) authData.getTlsCertificates(), - authData.getTlsPrivateKey()); + ? SecurityUtility.createNettySslContextForClient(this.sslProvider, this.tlsAllowInsecureConnection, + tlsTrustCertsFilePath.getFileName(), (X509Certificate[]) authData.getTlsCertificates(), + authData.getTlsPrivateKey(), this.ciphers, this.protocols) + : SecurityUtility.createNettySslContextForClient(this.sslProvider, this.tlsAllowInsecureConnection, + authData.getTlsTrustStoreStream(), (X509Certificate[]) authData.getTlsCertificates(), + authData.getTlsPrivateKey(), this.ciphers, this.protocols); } else { - this.sslNettyContext = SecurityUtility.createNettySslContextForClient(this.tlsAllowInsecureConnection, - this.tlsTrustCertsFilePath.getFileName()); + this.sslNettyContext = + SecurityUtility.createNettySslContextForClient(this.sslProvider, this.tlsAllowInsecureConnection, + this.tlsTrustCertsFilePath.getFileName(), this.ciphers, this.protocols); } return this.sslNettyContext; } @@ -73,6 +89,8 @@ public SslContext getSslContext() { @Override public boolean needUpdate() { - return tlsTrustCertsFilePath.checkAndRefresh(); + return tlsTrustCertsFilePath.checkAndRefresh() || tlsCertsFilePath.checkAndRefresh() + || tlsPrivateKeyFilePath.checkAndRefresh(); + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyServerSslContextBuilder.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyServerSslContextBuilder.java index 250e628f0def7..e9fbb1f5e3ecf 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyServerSslContextBuilder.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/NettyServerSslContextBuilder.java @@ -19,6 +19,7 @@ package org.apache.pulsar.common.util; import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslProvider; import java.io.FileNotFoundException; import java.io.IOException; import java.security.GeneralSecurityException; @@ -36,8 +37,10 @@ public class NettyServerSslContextBuilder extends SslContextAutoRefreshBuilder tlsCiphers; protected final Set tlsProtocols; protected final boolean tlsRequireTrustedClientCertOnConnect; + protected final SslProvider sslProvider; - public NettyServerSslContextBuilder(boolean allowInsecure, String trustCertsFilePath, String certificateFilePath, + public NettyServerSslContextBuilder(SslProvider sslProvider, boolean allowInsecure, String trustCertsFilePath, + String certificateFilePath, String keyFilePath, Set ciphers, Set protocols, boolean requireTrustedClientCertOnConnect, long delayInSeconds) { @@ -49,14 +52,17 @@ public NettyServerSslContextBuilder(boolean allowInsecure, String trustCertsFile this.tlsCiphers = ciphers; this.tlsProtocols = protocols; this.tlsRequireTrustedClientCertOnConnect = requireTrustedClientCertOnConnect; + this.sslProvider = sslProvider; } @Override public synchronized SslContext update() - throws SSLException, FileNotFoundException, GeneralSecurityException, IOException { - this.sslNettyContext = SecurityUtility.createNettySslContextForServer(tlsAllowInsecureConnection, - tlsTrustCertsFilePath.getFileName(), tlsCertificateFilePath.getFileName(), tlsKeyFilePath.getFileName(), - tlsCiphers, tlsProtocols, tlsRequireTrustedClientCertOnConnect); + throws SSLException, FileNotFoundException, GeneralSecurityException, IOException { + this.sslNettyContext = + SecurityUtility.createNettySslContextForServer(this.sslProvider, tlsAllowInsecureConnection, + tlsTrustCertsFilePath.getFileName(), tlsCertificateFilePath.getFileName(), + tlsKeyFilePath.getFileName(), + tlsCiphers, tlsProtocols, tlsRequireTrustedClientCertOnConnect); return this.sslNettyContext; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/ObjectMapperFactory.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/ObjectMapperFactory.java index 94e1b7af4a278..ef2e4894a721f 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/ObjectMapperFactory.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/ObjectMapperFactory.java @@ -37,7 +37,6 @@ import org.apache.pulsar.common.policies.data.AutoSubscriptionCreationOverride; import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride; import org.apache.pulsar.common.policies.data.BacklogQuota; -import org.apache.pulsar.common.policies.data.BacklogQuotaMixIn; import org.apache.pulsar.common.policies.data.BookieAffinityGroupData; import org.apache.pulsar.common.policies.data.BookieInfo; import org.apache.pulsar.common.policies.data.BookiesClusterInfo; @@ -192,7 +191,6 @@ private static void setAnnotationsModule(ObjectMapper mapper) { resolver.addMapping(AutoSubscriptionCreationOverride.class, AutoSubscriptionCreationOverrideImpl.class); // we use MixIn class to add jackson annotations - mapper.addMixIn(BacklogQuotaImpl.class, BacklogQuotaMixIn.class); mapper.addMixIn(ResourceQuota.class, ResourceQuotaMixIn.class); mapper.addMixIn(FunctionConfig.class, JsonIgnorePropertiesMixIn.class); mapper.addMixIn(FunctionState.class, JsonIgnorePropertiesMixIn.class); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java index 6f2d899808434..8f02bcc0e5cf3 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java @@ -19,6 +19,7 @@ package org.apache.pulsar.common.util; import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.base.MoreObjects; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -188,8 +189,7 @@ public synchronized boolean tryAcquire(long acquirePermit) { canAcquire = acquirePermit < 0 || acquiredPermits < this.permits; } else { // acquired-permits can't be larger than the rate - if (acquirePermit > this.permits) { - acquiredPermits = this.permits; + if (acquirePermit + acquiredPermits > this.permits) { return false; } @@ -256,7 +256,8 @@ public synchronized TimeUnit getRateTimeUnit() { } protected ScheduledFuture createTask() { - return executorService.scheduleAtFixedRate(this::renew, this.rateTime, this.rateTime, this.timeUnit); + return executorService.scheduleAtFixedRate(catchingAndLoggingThrowables(this::renew), this.rateTime, + this.rateTime, this.timeUnit); } synchronized void renew() { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RelativeTimeUtil.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RelativeTimeUtil.java index 36e6adfd32303..454cfda2c20db 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RelativeTimeUtil.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RelativeTimeUtil.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.common.util; +import java.math.BigDecimal; import java.util.concurrent.TimeUnit; import lombok.experimental.UtilityClass; @@ -63,4 +64,15 @@ public static long parseRelativeTimeInSeconds(String relativeTime) { throw new IllegalArgumentException("Invalid time unit '" + lastChar + "'"); } } + + /** + * Convert nanoseconds to seconds and keep three decimal places. + * @param ns + * @return seconds + */ + public static double nsToSeconds(long ns) { + double seconds = (double) ns / 1_000_000_000; + BigDecimal bd = new BigDecimal(seconds); + return bd.setScale(3, BigDecimal.ROUND_HALF_UP).doubleValue(); + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/Runnables.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/Runnables.java new file mode 100644 index 0000000000000..b720c4fb6af77 --- /dev/null +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/Runnables.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util; + +import java.util.concurrent.TimeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public final class Runnables { + private static final Logger LOGGER = LoggerFactory.getLogger(Runnables.class); + + private Runnables() {} + + /** + * Wraps a Runnable so that throwables are caught and logged when a Runnable is run. + * + * The main usecase for this method is to be used in {@link java.util.concurrent.ScheduledExecutorService#scheduleAtFixedRate(Runnable, long, long, TimeUnit)} + * calls to ensure that the scheduled task doesn't get cancelled as a result of an uncaught exception. + * + * @param runnable The runnable to wrap + * @return a wrapped Runnable + */ + public static Runnable catchingAndLoggingThrowables(Runnable runnable) { + return new CatchingAndLoggingRunnable(runnable); + } + + private static final class CatchingAndLoggingRunnable implements Runnable { + private final Runnable runnable; + + private CatchingAndLoggingRunnable(Runnable runnable) { + this.runnable = runnable; + } + + @Override + public void run() { + try { + runnable.run(); + } catch (Throwable t) { + LOGGER.error("Unexpected throwable caught", t); + } + } + } +} \ No newline at end of file diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/SecurityUtility.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/SecurityUtility.java index ec55f5349110c..a6d900d32bcc5 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/SecurityUtility.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/SecurityUtility.java @@ -21,6 +21,8 @@ import io.netty.handler.ssl.ClientAuth; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.SslProvider; import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import java.io.BufferedReader; import java.io.File; @@ -56,7 +58,9 @@ import javax.net.ssl.KeyManager; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; +import javax.net.ssl.SSLParameters; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import lombok.extern.slf4j.Slf4j; @@ -64,7 +68,6 @@ import org.apache.pulsar.common.classification.InterfaceAudience; import org.apache.pulsar.common.classification.InterfaceStability; import org.apache.pulsar.common.tls.TlsHostnameVerifier; -import org.eclipse.jetty.util.ssl.SslContextFactory; /** * Helper class for the security domain. @@ -111,11 +114,21 @@ public static Provider getProvider() { return getBCProviderFromClassPath(); } catch (Exception e) { log.warn("Not able to get Bouncy Castle provider for both FIPS and Non-FIPS from class path:", e); - throw new RuntimeException(e); + return null; } } private static Provider loadConscryptProvider() { + Class conscryptClazz; + + try { + conscryptClazz = Class.forName("org.conscrypt.Conscrypt"); + conscryptClazz.getMethod("checkAvailability").invoke(null); + } catch (Throwable e) { + log.warn("Conscrypt isn't available. Using JDK default security provider.", e); + return null; + } + Provider provider; try { provider = (Provider) Class.forName(CONSCRYPT_PROVIDER_CLASS).getDeclaredConstructor().newInstance(); @@ -143,7 +156,6 @@ private static Provider loadConscryptProvider() { // contains the workaround. try { HostnameVerifier hostnameVerifier = new TlsHostnameVerifier(); - Class conscryptClazz = Class.forName("org.conscrypt.Conscrypt"); Object wrappedHostnameVerifier = conscryptClazz .getMethod("wrapHostnameVerifier", new Class[]{HostnameVerifier.class}).invoke(null, hostnameVerifier); @@ -187,23 +199,27 @@ public static Provider getBCProviderFromClassPath() throws Exception { return provider; } - public static SSLContext createSslContext(boolean allowInsecureConnection, Certificate[] trustCertificates) + public static SSLContext createSslContext(boolean allowInsecureConnection, Certificate[] trustCertificates, + String providerName) throws GeneralSecurityException { - return createSslContext(allowInsecureConnection, trustCertificates, (Certificate[]) null, (PrivateKey) null); + return createSslContext(allowInsecureConnection, trustCertificates, null, null, providerName); } - public static SslContext createNettySslContextForClient(boolean allowInsecureConnection, String trustCertsFilePath) + public static SslContext createNettySslContextForClient(SslProvider sslProvider, boolean allowInsecureConnection, + String trustCertsFilePath, Set ciphers, + Set protocols) throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { - return createNettySslContextForClient(allowInsecureConnection, trustCertsFilePath, (Certificate[]) null, - (PrivateKey) null); + return createNettySslContextForClient(sslProvider, allowInsecureConnection, trustCertsFilePath, + (Certificate[]) null, + (PrivateKey) null, ciphers, protocols); } public static SSLContext createSslContext(boolean allowInsecureConnection, String trustCertsFilePath, - String certFilePath, String keyFilePath) throws GeneralSecurityException { + String certFilePath, String keyFilePath, String providerName) throws GeneralSecurityException { X509Certificate[] trustCertificates = loadCertificatesFromPemFile(trustCertsFilePath); X509Certificate[] certificates = loadCertificatesFromPemFile(certFilePath); PrivateKey privateKey = loadPrivateKeyFromPemFile(keyFilePath); - return createSslContext(allowInsecureConnection, trustCertificates, certificates, privateKey); + return createSslContext(allowInsecureConnection, trustCertificates, certificates, privateKey, providerName); } /** @@ -221,62 +237,84 @@ public static SSLContext createSslContext(boolean allowInsecureConnection, Strin * @throws FileNotFoundException * @throws IOException */ - public static SslContext createAutoRefreshSslContextForClient(boolean allowInsecureConnection, - String trustCertsFilePath, String certFilePath, String keyFilePath, String sslContextAlgorithm, - int refreshDurationSec, ScheduledExecutorService executor) + public static SslContext createAutoRefreshSslContextForClient(SslProvider sslProvider, + boolean allowInsecureConnection, + String trustCertsFilePath, String certFilePath, + String keyFilePath, String sslContextAlgorithm, + int refreshDurationSec, + ScheduledExecutorService executor) throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { KeyManagerProxy keyManager = new KeyManagerProxy(certFilePath, keyFilePath, refreshDurationSec, executor); - SslContextBuilder sslContexBuilder = SslContextBuilder.forClient(); + SslContextBuilder sslContexBuilder = SslContextBuilder.forClient().sslProvider(sslProvider); sslContexBuilder.keyManager(keyManager); if (allowInsecureConnection) { sslContexBuilder.trustManager(InsecureTrustManagerFactory.INSTANCE); } else { - TrustManagerProxy trustManager = new TrustManagerProxy(trustCertsFilePath, refreshDurationSec, executor); - sslContexBuilder.trustManager(trustManager); + if (StringUtils.isNotBlank(trustCertsFilePath)) { + TrustManagerProxy trustManager = + new TrustManagerProxy(trustCertsFilePath, refreshDurationSec, executor); + sslContexBuilder.trustManager(trustManager); + } } return sslContexBuilder.build(); } - public static SslContext createNettySslContextForClient(boolean allowInsecureConnection, String trustCertsFilePath, - String certFilePath, String keyFilePath) + public static SslContext createNettySslContextForClient(SslProvider sslProvider, boolean allowInsecureConnection, + String trustCertsFilePath, + String certFilePath, String keyFilePath, + Set ciphers, + Set protocols) throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { X509Certificate[] certificates = loadCertificatesFromPemFile(certFilePath); PrivateKey privateKey = loadPrivateKeyFromPemFile(keyFilePath); - return createNettySslContextForClient(allowInsecureConnection, trustCertsFilePath, certificates, privateKey); + return createNettySslContextForClient(sslProvider, allowInsecureConnection, trustCertsFilePath, certificates, + privateKey, ciphers, protocols); } - public static SslContext createNettySslContextForClient(boolean allowInsecureConnection, String trustCertsFilePath, - Certificate[] certificates, PrivateKey privateKey) + public static SslContext createNettySslContextForClient(SslProvider sslProvider, boolean allowInsecureConnection, + String trustCertsFilePath, + Certificate[] certificates, PrivateKey privateKey, + Set ciphers, + Set protocols) throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { if (StringUtils.isNotBlank(trustCertsFilePath)) { try (FileInputStream trustCertsStream = new FileInputStream(trustCertsFilePath)) { - return createNettySslContextForClient(allowInsecureConnection, trustCertsStream, certificates, - privateKey); + return createNettySslContextForClient(sslProvider, allowInsecureConnection, trustCertsStream, + certificates, + privateKey, ciphers, protocols); } } else { - return createNettySslContextForClient(allowInsecureConnection, (InputStream) null, certificates, - privateKey); + return createNettySslContextForClient(sslProvider, allowInsecureConnection, (InputStream) null, + certificates, + privateKey, ciphers, protocols); } } - public static SslContext createNettySslContextForClient(boolean allowInsecureConnection, - InputStream trustCertsStream, Certificate[] certificates, PrivateKey privateKey) + public static SslContext createNettySslContextForClient(SslProvider sslProvider, boolean allowInsecureConnection, + InputStream trustCertsStream, Certificate[] certificates, + PrivateKey privateKey, Set ciphers, + Set protocols) throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { - SslContextBuilder builder = SslContextBuilder.forClient(); + SslContextBuilder builder = SslContextBuilder.forClient().sslProvider(sslProvider); setupTrustCerts(builder, allowInsecureConnection, trustCertsStream); setupKeyManager(builder, privateKey, (X509Certificate[]) certificates); + setupCiphers(builder, ciphers); + setupProtocols(builder, protocols); return builder.build(); } - public static SslContext createNettySslContextForServer(boolean allowInsecureConnection, String trustCertsFilePath, - String certFilePath, String keyFilePath, Set ciphers, Set protocols, - boolean requireTrustedClientCertOnConnect) + public static SslContext createNettySslContextForServer(SslProvider sslProvider, boolean allowInsecureConnection, + String trustCertsFilePath, + String certFilePath, String keyFilePath, + Set ciphers, Set protocols, + boolean requireTrustedClientCertOnConnect) throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { X509Certificate[] certificates = loadCertificatesFromPemFile(certFilePath); PrivateKey privateKey = loadPrivateKeyFromPemFile(keyFilePath); - SslContextBuilder builder = SslContextBuilder.forServer(privateKey, (X509Certificate[]) certificates); + SslContextBuilder builder = + SslContextBuilder.forServer(privateKey, (X509Certificate[]) certificates).sslProvider(sslProvider); setupCiphers(builder, ciphers); setupProtocols(builder, protocols); if (StringUtils.isNotBlank(trustCertsFilePath)) { @@ -292,18 +330,25 @@ public static SslContext createNettySslContextForServer(boolean allowInsecureCon } public static SSLContext createSslContext(boolean allowInsecureConnection, Certificate[] trustCertficates, - Certificate[] certificates, PrivateKey privateKey) throws GeneralSecurityException { + Certificate[] certificates, PrivateKey privateKey) + throws GeneralSecurityException { + return createSslContext(allowInsecureConnection, trustCertficates, certificates, privateKey, null); + } + + public static SSLContext createSslContext(boolean allowInsecureConnection, Certificate[] trustCertficates, + Certificate[] certificates, PrivateKey privateKey, String providerName) + throws GeneralSecurityException { KeyStoreHolder ksh = new KeyStoreHolder(); TrustManager[] trustManagers = null; KeyManager[] keyManagers = null; + Provider provider = resolveProvider(providerName); - trustManagers = setupTrustCerts(ksh, allowInsecureConnection, trustCertficates, CONSCRYPT_PROVIDER); + trustManagers = setupTrustCerts(ksh, allowInsecureConnection, trustCertficates, provider); keyManagers = setupKeyManager(ksh, privateKey, certificates); - SSLContext sslCtx = CONSCRYPT_PROVIDER != null ? SSLContext.getInstance("TLS", CONSCRYPT_PROVIDER) + SSLContext sslCtx = provider != null ? SSLContext.getInstance("TLS", provider) : SSLContext.getInstance("TLS"); sslCtx.init(keyManagers, trustManagers, new SecureRandom()); - sslCtx.getDefaultSSLParameters(); return sslCtx; } @@ -422,12 +467,12 @@ public static X509Certificate[] loadCertificatesFromPemStream(InputStream inStre } public static PrivateKey loadPrivateKeyFromPemFile(String keyFilePath) throws KeyManagementException { - PrivateKey privateKey = null; - if (keyFilePath == null || keyFilePath.isEmpty()) { - return privateKey; + return null; } + PrivateKey privateKey; + try (FileInputStream input = new FileInputStream(keyFilePath)) { privateKey = loadPrivateKeyFromPemStream(input); } catch (IOException e) { @@ -438,12 +483,12 @@ public static PrivateKey loadPrivateKeyFromPemFile(String keyFilePath) throws Ke } public static PrivateKey loadPrivateKeyFromPemStream(InputStream inStream) throws KeyManagementException { - PrivateKey privateKey = null; - if (inStream == null) { - return privateKey; + return null; } + PrivateKey privateKey; + try (BufferedReader reader = new BufferedReader(new InputStreamReader(inStream, StandardCharsets.UTF_8))) { if (inStream.markSupported()) { inStream.reset(); @@ -510,51 +555,23 @@ private static void setupClientAuthentication(SslContextBuilder builder, } } - public static SslContextFactory createSslContextFactory(boolean tlsAllowInsecureConnection, - String tlsTrustCertsFilePath, String tlsCertificateFilePath, String tlsKeyFilePath, - boolean tlsRequireTrustedClientCertOnConnect, boolean autoRefresh, long certRefreshInSec) - throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { - SslContextFactory sslCtxFactory = null; - if (autoRefresh) { - sslCtxFactory = new SslContextFactoryWithAutoRefresh(tlsAllowInsecureConnection, tlsTrustCertsFilePath, - tlsCertificateFilePath, tlsKeyFilePath, tlsRequireTrustedClientCertOnConnect, 0); - } else { - sslCtxFactory = new SslContextFactory(); - SSLContext sslCtx = createSslContext(tlsAllowInsecureConnection, tlsTrustCertsFilePath, - tlsCertificateFilePath, tlsKeyFilePath); - sslCtxFactory.setSslContext(sslCtx); - } - if (tlsRequireTrustedClientCertOnConnect) { - sslCtxFactory.setNeedClientAuth(true); - } else { - sslCtxFactory.setWantClientAuth(true); - } - sslCtxFactory.setTrustAll(true); - return sslCtxFactory; + public static void configureSSLHandler(SslHandler handler) { + SSLEngine sslEngine = handler.engine(); + SSLParameters sslParameters = sslEngine.getSSLParameters(); + sslParameters.setEndpointIdentificationAlgorithm("HTTPS"); + sslEngine.setSSLParameters(sslParameters); } - /** - * {@link SslContextFactory} that auto-refresh SSLContext. - */ - static class SslContextFactoryWithAutoRefresh extends SslContextFactory { - - private final DefaultSslContextBuilder sslCtxRefresher; - - public SslContextFactoryWithAutoRefresh(boolean tlsAllowInsecureConnection, String tlsTrustCertsFilePath, - String tlsCertificateFilePath, String tlsKeyFilePath, boolean tlsRequireTrustedClientCertOnConnect, - long certRefreshInSec) - throws SSLException, FileNotFoundException, GeneralSecurityException, IOException { - super(); - sslCtxRefresher = new DefaultSslContextBuilder(tlsAllowInsecureConnection, tlsTrustCertsFilePath, - tlsCertificateFilePath, tlsKeyFilePath, tlsRequireTrustedClientCertOnConnect, certRefreshInSec); - if (CONSCRYPT_PROVIDER != null) { - setProvider(CONSCRYPT_PROVIDER.getName()); - } + public static Provider resolveProvider(String providerName) throws NoSuchAlgorithmException { + Provider provider = null; + if (!StringUtils.isEmpty(providerName)) { + provider = Security.getProvider(providerName); } - @Override - public SSLContext getSslContext() { - return sslCtxRefresher.get(); + if (provider == null) { + provider = SSLContext.getDefault().getProvider(); } + + return provider; } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/SimpleTextOutputStream.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/SimpleTextOutputStream.java index 9fc4b347c854f..ecfbc10d482e1 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/SimpleTextOutputStream.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/SimpleTextOutputStream.java @@ -19,6 +19,7 @@ package org.apache.pulsar.common.util; import io.netty.buffer.ByteBuf; +import io.netty.util.CharsetUtil; /** * Format strings and numbers into a ByteBuf without any memory allocation. @@ -26,8 +27,8 @@ */ public class SimpleTextOutputStream { private final ByteBuf buffer; - private static final char[] hexChars = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', - 'f' }; + private static final char[] hexChars = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', + 'f'}; public SimpleTextOutputStream(ByteBuf buffer) { this.buffer = buffer; @@ -44,7 +45,7 @@ public SimpleTextOutputStream write(byte[] a, int offset, int len) { } public SimpleTextOutputStream write(char c) { - buffer.writeByte((byte) c); + write(String.valueOf(c)); return this; } @@ -52,11 +53,8 @@ public SimpleTextOutputStream write(String s) { if (s == null) { return this; } - int len = s.length(); - for (int i = 0; i < len; i++) { - buffer.writeByte((byte) s.charAt(i)); - } + buffer.writeCharSequence(s, CharsetUtil.UTF_8); return this; } @@ -131,4 +129,12 @@ public SimpleTextOutputStream write(double d) { write(r); return this; } + + public void write(ByteBuf byteBuf) { + buffer.writeBytes(byteBuf); + } + + public ByteBuf getBuffer() { + return buffer; + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/TrustManagerProxy.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/TrustManagerProxy.java index 7edbbb47da197..64e9545fe0481 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/TrustManagerProxy.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/TrustManagerProxy.java @@ -18,16 +18,14 @@ */ package org.apache.pulsar.common.util; -import java.io.FileInputStream; -import java.io.FileNotFoundException; +import io.netty.handler.ssl.SslContext; import java.io.IOException; -import java.io.InputStream; import java.net.Socket; +import java.security.KeyManagementException; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -36,7 +34,6 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; -import io.netty.handler.ssl.SslContext; import lombok.extern.slf4j.Slf4j; /** @@ -47,13 +44,13 @@ public class TrustManagerProxy extends X509ExtendedTrustManager { private volatile X509ExtendedTrustManager trustManager; - private FileModifiedTimeUpdater certFile; + private final FileModifiedTimeUpdater certFile; public TrustManagerProxy(String caCertFile, int refreshDurationSec, ScheduledExecutorService executor) { this.certFile = new FileModifiedTimeUpdater(caCertFile); try { updateTrustManager(); - } catch (IOException | CertificateException e) { + } catch (KeyManagementException | IOException | CertificateException e) { log.warn("Failed to load cert {}, {}", certFile, e.getMessage()); throw new IllegalArgumentException(e); } catch (NoSuchAlgorithmException | KeyStoreException e) { @@ -73,19 +70,18 @@ private void updateTrustManagerSafely() { } private void updateTrustManager() throws CertificateException, KeyStoreException, NoSuchAlgorithmException, - FileNotFoundException, IOException { - CertificateFactory factory = CertificateFactory.getInstance("X.509"); - try (InputStream inputStream = new FileInputStream(certFile.getFileName())) { - X509Certificate certificate = (X509Certificate) factory.generateCertificate(inputStream); + IOException, KeyManagementException { + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + keyStore.load(null); + X509Certificate[] certificates = SecurityUtility.loadCertificatesFromPemFile(certFile.getFileName()); + for (X509Certificate certificate : certificates) { String alias = certificate.getSubjectX500Principal().getName(); - KeyStore keyStore = KeyStore.getInstance("JKS"); - keyStore.load(null); keyStore.setCertificateEntry(alias, certificate); - final TrustManagerFactory trustManagerFactory = TrustManagerFactory - .getInstance(TrustManagerFactory.getDefaultAlgorithm()); - trustManagerFactory.init(keyStore); - trustManager = (X509ExtendedTrustManager) trustManagerFactory.getTrustManagers()[0]; } + final TrustManagerFactory trustManagerFactory = TrustManagerFactory + .getInstance(TrustManagerFactory.getDefaultAlgorithm()); + trustManagerFactory.init(keyStore); + trustManager = (X509ExtendedTrustManager) trustManagerFactory.getTrustManagers()[0]; } @Override diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java index cd285221bc862..6f2794468c464 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java @@ -44,33 +44,112 @@ public class ConcurrentLongHashMap { private static final Object EmptyValue = null; private static final Object DeletedValue = new Object(); - private static final float MapFillFactor = 0.66f; - private static final int DefaultExpectedItems = 256; private static final int DefaultConcurrencyLevel = 16; + private static final float DefaultMapFillFactor = 0.66f; + private static final float DefaultMapIdleFactor = 0.15f; + + private static final float DefaultExpandFactor = 2; + private static final float DefaultShrinkFactor = 2; + + private static final boolean DefaultAutoShrink = false; + + public static Builder newBuilder() { + return new Builder<>(); + } + + /** + * Builder of ConcurrentLongHashMap. + */ + public static class Builder { + int expectedItems = DefaultExpectedItems; + int concurrencyLevel = DefaultConcurrencyLevel; + float mapFillFactor = DefaultMapFillFactor; + float mapIdleFactor = DefaultMapIdleFactor; + float expandFactor = DefaultExpandFactor; + float shrinkFactor = DefaultShrinkFactor; + boolean autoShrink = DefaultAutoShrink; + + public Builder expectedItems(int expectedItems) { + this.expectedItems = expectedItems; + return this; + } + + public Builder concurrencyLevel(int concurrencyLevel) { + this.concurrencyLevel = concurrencyLevel; + return this; + } + + public Builder mapFillFactor(float mapFillFactor) { + this.mapFillFactor = mapFillFactor; + return this; + } + + public Builder mapIdleFactor(float mapIdleFactor) { + this.mapIdleFactor = mapIdleFactor; + return this; + } + + public Builder expandFactor(float expandFactor) { + this.expandFactor = expandFactor; + return this; + } + + public Builder shrinkFactor(float shrinkFactor) { + this.shrinkFactor = shrinkFactor; + return this; + } + + public Builder autoShrink(boolean autoShrink) { + this.autoShrink = autoShrink; + return this; + } + + public ConcurrentLongHashMap build() { + return new ConcurrentLongHashMap<>(expectedItems, concurrencyLevel, + mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor); + } + } + private final Section[] sections; + @Deprecated public ConcurrentLongHashMap() { this(DefaultExpectedItems); } + @Deprecated public ConcurrentLongHashMap(int expectedItems) { this(expectedItems, DefaultConcurrencyLevel); } + @Deprecated public ConcurrentLongHashMap(int expectedItems, int concurrencyLevel) { + this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor, + DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor); + } + + public ConcurrentLongHashMap(int expectedItems, int concurrencyLevel, + float mapFillFactor, float mapIdleFactor, + boolean autoShrink, float expandFactor, float shrinkFactor) { checkArgument(expectedItems > 0); checkArgument(concurrencyLevel > 0); checkArgument(expectedItems >= concurrencyLevel); + checkArgument(mapFillFactor > 0 && mapFillFactor < 1); + checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1); + checkArgument(mapFillFactor > mapIdleFactor); + checkArgument(expandFactor > 1); + checkArgument(shrinkFactor > 1); int numSections = concurrencyLevel; int perSectionExpectedItems = expectedItems / numSections; - int perSectionCapacity = (int) (perSectionExpectedItems / MapFillFactor); + int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor); this.sections = (Section[]) new Section[numSections]; for (int i = 0; i < numSections; i++) { - sections[i] = new Section<>(perSectionCapacity); + sections[i] = new Section<>(perSectionCapacity, mapFillFactor, mapIdleFactor, + autoShrink, expandFactor, shrinkFactor); } } @@ -195,20 +274,35 @@ private static final class Section extends StampedLock { private volatile V[] values; private volatile int capacity; + private final int initCapacity; private static final AtomicIntegerFieldUpdater
SIZE_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Section.class, "size"); private volatile int size; private int usedBuckets; - private int resizeThreshold; - - Section(int capacity) { + private int resizeThresholdUp; + private int resizeThresholdBelow; + private final float mapFillFactor; + private final float mapIdleFactor; + private final float expandFactor; + private final float shrinkFactor; + private final boolean autoShrink; + + Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink, + float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); + this.initCapacity = this.capacity; this.keys = new long[this.capacity]; this.values = (V[]) new Object[this.capacity]; this.size = 0; this.usedBuckets = 0; - this.resizeThreshold = (int) (this.capacity * MapFillFactor); + this.autoShrink = autoShrink; + this.mapFillFactor = mapFillFactor; + this.mapIdleFactor = mapIdleFactor; + this.expandFactor = expandFactor; + this.shrinkFactor = shrinkFactor; + this.resizeThresholdUp = (int) (this.capacity * mapFillFactor); + this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor); } V get(long key, int keyHash) { @@ -322,9 +416,10 @@ V put(long key, V value, int keyHash, boolean onlyIfAbsent, LongFunction valu ++bucket; } } finally { - if (usedBuckets >= resizeThreshold) { + if (usedBuckets > resizeThresholdUp) { try { - rehash(); + int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor)); + rehash(newCapacity); } finally { unlockWrite(stamp); } @@ -356,6 +451,16 @@ private V remove(long key, Object value, int keyHash) { if (nextValueInArray == EmptyValue) { values[bucket] = (V) EmptyValue; --usedBuckets; + + // Cleanup all the buckets that were in `DeletedValue` state, + // so that we can reduce unnecessary expansions + int lastBucket = signSafeMod(bucket - 1, capacity); + while (values[lastBucket] == DeletedValue) { + values[lastBucket] = (V) EmptyValue; + --usedBuckets; + + lastBucket = signSafeMod(lastBucket - 1, capacity); + } } else { values[bucket] = (V) DeletedValue; } @@ -373,7 +478,20 @@ private V remove(long key, Object value, int keyHash) { } } finally { - unlockWrite(stamp); + if (autoShrink && size < resizeThresholdBelow) { + try { + int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor)); + int newResizeThresholdUp = (int) (newCapacity * mapFillFactor); + if (newCapacity < capacity && newResizeThresholdUp > size) { + // shrink the hashmap + rehash(newCapacity); + } + } finally { + unlockWrite(stamp); + } + } else { + unlockWrite(stamp); + } } } @@ -385,6 +503,9 @@ void clear() { Arrays.fill(values, EmptyValue); this.size = 0; this.usedBuckets = 0; + if (autoShrink) { + rehash(initCapacity); + } } finally { unlockWrite(stamp); } @@ -439,9 +560,8 @@ public void forEach(EntryProcessor processor) { } } - private void rehash() { + private void rehash(int newCapacity) { // Expand the hashmap - int newCapacity = capacity * 2; long[] newKeys = new long[newCapacity]; V[] newValues = (V[]) new Object[newCapacity]; @@ -458,7 +578,8 @@ private void rehash() { values = newValues; capacity = newCapacity; usedBuckets = size; - resizeThreshold = (int) (capacity * MapFillFactor); + resizeThresholdUp = (int) (capacity * mapFillFactor); + resizeThresholdBelow = (int) (capacity * mapIdleFactor); } private static void insertKeyValueNoLock(long[] keys, V[] values, long key, V value) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java new file mode 100644 index 0000000000000..eac7268ba672d --- /dev/null +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java @@ -0,0 +1,673 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util.collections; + +import static com.google.common.base.Preconditions.checkArgument; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.locks.StampedLock; + +/** + * Concurrent hash map where both keys and values are composed of pairs of longs. + * + *

(long,long) --> (long,long) + * + *

Provides similar methods as a {@code ConcurrentMap} but since it's an open hash map with linear probing, + * no node allocations are required to store the keys and values, and no boxing is required. + * + *

Keys MUST be >= 0. + */ +public class ConcurrentLongLongPairHashMap { + + private static final long EmptyKey = -1L; + private static final long DeletedKey = -2L; + + private static final long ValueNotFound = -1L; + + + private static final int DefaultExpectedItems = 256; + private static final int DefaultConcurrencyLevel = 16; + + private static final float DefaultMapFillFactor = 0.66f; + private static final float DefaultMapIdleFactor = 0.15f; + + private static final float DefaultExpandFactor = 2; + private static final float DefaultShrinkFactor = 2; + + private static final boolean DefaultAutoShrink = false; + + private final Section[] sections; + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder of ConcurrentLongLongPairHashMap. + */ + public static class Builder { + int expectedItems = DefaultExpectedItems; + int concurrencyLevel = DefaultConcurrencyLevel; + float mapFillFactor = DefaultMapFillFactor; + float mapIdleFactor = DefaultMapIdleFactor; + float expandFactor = DefaultExpandFactor; + float shrinkFactor = DefaultShrinkFactor; + boolean autoShrink = DefaultAutoShrink; + + public Builder expectedItems(int expectedItems) { + this.expectedItems = expectedItems; + return this; + } + + public Builder concurrencyLevel(int concurrencyLevel) { + this.concurrencyLevel = concurrencyLevel; + return this; + } + + public Builder mapFillFactor(float mapFillFactor) { + this.mapFillFactor = mapFillFactor; + return this; + } + + public Builder mapIdleFactor(float mapIdleFactor) { + this.mapIdleFactor = mapIdleFactor; + return this; + } + + public Builder expandFactor(float expandFactor) { + this.expandFactor = expandFactor; + return this; + } + + public Builder shrinkFactor(float shrinkFactor) { + this.shrinkFactor = shrinkFactor; + return this; + } + + public Builder autoShrink(boolean autoShrink) { + this.autoShrink = autoShrink; + return this; + } + + public ConcurrentLongLongPairHashMap build() { + return new ConcurrentLongLongPairHashMap(expectedItems, concurrencyLevel, + mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor); + } + } + + /** + * A BiConsumer Long pair. + */ + public interface BiConsumerLongPair { + void accept(long key1, long key2, long value1, long value2); + } + + /** + * A Long pair function. + */ + public interface LongLongPairFunction { + long apply(long key1, long key2); + } + + /** + * A Long pair predicate. + */ + public interface LongLongPairPredicate { + boolean test(long key1, long key2, long value1, long value2); + } + + private ConcurrentLongLongPairHashMap(int expectedItems, int concurrencyLevel, + float mapFillFactor, float mapIdleFactor, + boolean autoShrink, float expandFactor, float shrinkFactor) { + checkArgument(expectedItems > 0); + checkArgument(concurrencyLevel > 0); + checkArgument(expectedItems >= concurrencyLevel); + checkArgument(mapFillFactor > 0 && mapFillFactor < 1); + checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1); + checkArgument(mapFillFactor > mapIdleFactor); + checkArgument(expandFactor > 1); + checkArgument(shrinkFactor > 1); + + int numSections = concurrencyLevel; + int perSectionExpectedItems = expectedItems / numSections; + int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor); + this.sections = new Section[numSections]; + + for (int i = 0; i < numSections; i++) { + sections[i] = new Section(perSectionCapacity, mapFillFactor, mapIdleFactor, + autoShrink, expandFactor, shrinkFactor); + } + } + + public long size() { + long size = 0; + for (Section s : sections) { + size += s.size; + } + return size; + } + + public long capacity() { + long capacity = 0; + for (Section s : sections) { + capacity += s.capacity; + } + return capacity; + } + + public boolean isEmpty() { + for (Section s : sections) { + if (s.size != 0) { + return false; + } + } + + return true; + } + + long getUsedBucketCount() { + long usedBucketCount = 0; + for (Section s : sections) { + usedBucketCount += s.usedBuckets; + } + return usedBucketCount; + } + + /** + * @param key1 + * @param key2 + * @return the value or -1 if the key was not present. + */ + public LongPair get(long key1, long key2) { + checkBiggerEqualZero(key1); + long h = hash(key1, key2); + return getSection(h).get(key1, key2, (int) h); + } + + public boolean containsKey(long key1, long key2) { + return get(key1, key2) != null; + } + + public boolean put(long key1, long key2, long value1, long value2) { + checkBiggerEqualZero(key1); + checkBiggerEqualZero(value1); + long h = hash(key1, key2); + return getSection(h).put(key1, key2, value1, value2, (int) h, false); + } + + public boolean putIfAbsent(long key1, long key2, long value1, long value2) { + checkBiggerEqualZero(key1); + checkBiggerEqualZero(value1); + long h = hash(key1, key2); + return getSection(h).put(key1, key2, value1, value2, (int) h, true); + } + + /** + * Remove an existing entry if found. + * + * @param key1 + * @param key2 + * @return the value associated with the key or -1 if key was not present. + */ + public boolean remove(long key1, long key2) { + checkBiggerEqualZero(key1); + long h = hash(key1, key2); + return getSection(h).remove(key1, key2, ValueNotFound, ValueNotFound, (int) h); + } + + public boolean remove(long key1, long key2, long value1, long value2) { + checkBiggerEqualZero(key1); + checkBiggerEqualZero(value1); + long h = hash(key1, key2); + return getSection(h).remove(key1, key2, value1, value2, (int) h); + } + + private Section getSection(long hash) { + // Use 32 msb out of long to get the section + final int sectionIdx = (int) (hash >>> 32) & (sections.length - 1); + return sections[sectionIdx]; + } + + public void clear() { + for (Section s : sections) { + s.clear(); + } + } + + public void forEach(BiConsumerLongPair processor) { + for (Section s : sections) { + s.forEach(processor); + } + } + + /** + * @return a new list of all keys (makes a copy). + */ + public List keys() { + List keys = Lists.newArrayListWithExpectedSize((int) size()); + forEach((key1, key2, value1, value2) -> keys.add(new LongPair(key1, key2))); + return keys; + } + + public List values() { + List values = Lists.newArrayListWithExpectedSize((int) size()); + forEach((key1, key2, value1, value2) -> values.add(new LongPair(value1, value2))); + return values; + } + + public Map asMap() { + Map map = Maps.newHashMapWithExpectedSize((int) size()); + forEach((key1, key2, value1, value2) -> map.put(new LongPair(key1, key2), new LongPair(value1, value2))); + return map; + } + + // A section is a portion of the hash map that is covered by a single + @SuppressWarnings("serial") + private static final class Section extends StampedLock { + // Keys and values are stored interleaved in the table array + private volatile long[] table; + + private volatile int capacity; + private final int initCapacity; + private static final AtomicIntegerFieldUpdater

SIZE_UPDATER = + AtomicIntegerFieldUpdater.newUpdater(Section.class, "size"); + + private volatile int size; + private int usedBuckets; + private int resizeThresholdUp; + private int resizeThresholdBelow; + private final float mapFillFactor; + private final float mapIdleFactor; + private final float expandFactor; + private final float shrinkFactor; + private final boolean autoShrink; + + Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink, + float expandFactor, float shrinkFactor) { + this.capacity = alignToPowerOfTwo(capacity); + this.initCapacity = this.capacity; + this.table = new long[4 * this.capacity]; + this.size = 0; + this.usedBuckets = 0; + this.autoShrink = autoShrink; + this.mapFillFactor = mapFillFactor; + this.mapIdleFactor = mapIdleFactor; + this.expandFactor = expandFactor; + this.shrinkFactor = shrinkFactor; + this.resizeThresholdUp = (int) (this.capacity * mapFillFactor); + this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor); + Arrays.fill(table, EmptyKey); + } + + LongPair get(long key1, long key2, int keyHash) { + long stamp = tryOptimisticRead(); + boolean acquiredLock = false; + int bucket = signSafeMod(keyHash, capacity); + + try { + while (true) { + // First try optimistic locking + long storedKey1 = table[bucket]; + long storedKey2 = table[bucket + 1]; + long storedValue1 = table[bucket + 2]; + long storedValue2 = table[bucket + 3]; + + if (!acquiredLock && validate(stamp)) { + // The values we have read are consistent + if (key1 == storedKey1 && key2 == storedKey2) { + return new LongPair(storedValue1, storedValue2); + } else if (storedKey1 == EmptyKey) { + // Not found + return null; + } + } else { + // Fallback to acquiring read lock + if (!acquiredLock) { + stamp = readLock(); + acquiredLock = true; + + bucket = signSafeMod(keyHash, capacity); + storedKey1 = table[bucket]; + storedKey2 = table[bucket + 1]; + storedValue1 = table[bucket + 2]; + storedValue2 = table[bucket + 3]; + } + + if (key1 == storedKey1 && key2 == storedKey2) { + return new LongPair(storedValue1, storedValue2); + } else if (storedKey1 == EmptyKey) { + // Not found + return null; + } + } + + bucket = (bucket + 4) & (table.length - 1); + } + } finally { + if (acquiredLock) { + unlockRead(stamp); + } + } + } + + boolean put(long key1, long key2, long value1, long value2, int keyHash, boolean onlyIfAbsent) { + long stamp = writeLock(); + int bucket = signSafeMod(keyHash, capacity); + + // Remember where we find the first available spot + int firstDeletedKey = -1; + + try { + while (true) { + long storedKey1 = table[bucket]; + long storedKey2 = table[bucket + 1]; + + if (key1 == storedKey1 && key2 == storedKey2) { + if (!onlyIfAbsent) { + // Over written an old value for same key + table[bucket + 2] = value1; + table[bucket + 3] = value2; + return true; + } else { + return false; + } + } else if (storedKey1 == EmptyKey) { + // Found an empty bucket. This means the key is not in the map. If we've already seen a deleted + // key, we should write at that position + if (firstDeletedKey != -1) { + bucket = firstDeletedKey; + } else { + ++usedBuckets; + } + + table[bucket] = key1; + table[bucket + 1] = key2; + table[bucket + 2] = value1; + table[bucket + 3] = value2; + SIZE_UPDATER.incrementAndGet(this); + return true; + } else if (storedKey1 == DeletedKey) { + // The bucket contained a different deleted key + if (firstDeletedKey == -1) { + firstDeletedKey = bucket; + } + } + + bucket = (bucket + 4) & (table.length - 1); + } + } finally { + if (usedBuckets > resizeThresholdUp) { + try { + // Expand the hashmap + int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor)); + rehash(newCapacity); + } finally { + unlockWrite(stamp); + } + } else { + unlockWrite(stamp); + } + } + } + + private boolean remove(long key1, long key2, long value1, long value2, int keyHash) { + long stamp = writeLock(); + int bucket = signSafeMod(keyHash, capacity); + + try { + while (true) { + long storedKey1 = table[bucket]; + long storedKey2 = table[bucket + 1]; + long storedValue1 = table[bucket + 2]; + long storedValue2 = table[bucket + 3]; + if (key1 == storedKey1 && key2 == storedKey2) { + if (value1 == ValueNotFound || (value1 == storedValue1 && value2 == storedValue2)) { + SIZE_UPDATER.decrementAndGet(this); + + cleanBucket(bucket); + return true; + } else { + return false; + } + } else if (storedKey1 == EmptyKey) { + // Key wasn't found + return false; + } + + bucket = (bucket + 4) & (table.length - 1); + } + + } finally { + if (autoShrink && size < resizeThresholdBelow) { + try { + int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor)); + int newResizeThresholdUp = (int) (newCapacity * mapFillFactor); + if (newCapacity < capacity && newResizeThresholdUp > size) { + // shrink the hashmap + rehash(newCapacity); + } + } finally { + unlockWrite(stamp); + } + } else { + unlockWrite(stamp); + } + } + } + + private void cleanBucket(int bucket) { + int nextInArray = (bucket + 4) & (table.length - 1); + if (table[nextInArray] == EmptyKey) { + table[bucket] = EmptyKey; + table[bucket + 1] = EmptyKey; + table[bucket + 2] = ValueNotFound; + table[bucket + 3] = ValueNotFound; + --usedBuckets; + + // Cleanup all the buckets that were in `DeletedKey` state, so that we can reduce unnecessary expansions + bucket = (bucket - 4) & (table.length - 1); + while (table[bucket] == DeletedKey) { + table[bucket] = EmptyKey; + table[bucket + 1] = EmptyKey; + table[bucket + 2] = ValueNotFound; + table[bucket + 3] = ValueNotFound; + --usedBuckets; + + bucket = (bucket - 4) & (table.length - 1); + } + } else { + table[bucket] = DeletedKey; + table[bucket + 1] = DeletedKey; + table[bucket + 2] = ValueNotFound; + table[bucket + 3] = ValueNotFound; + } + } + + void clear() { + long stamp = writeLock(); + + try { + Arrays.fill(table, EmptyKey); + this.size = 0; + this.usedBuckets = 0; + if (autoShrink) { + rehash(initCapacity); + } + } finally { + unlockWrite(stamp); + } + } + + public void forEach(BiConsumerLongPair processor) { + long stamp = tryOptimisticRead(); + + long[] table = this.table; + boolean acquiredReadLock = false; + + try { + + // Validate no rehashing + if (!validate(stamp)) { + // Fallback to read lock + stamp = readLock(); + acquiredReadLock = true; + table = this.table; + } + + // Go through all the buckets for this section + for (int bucket = 0; bucket < table.length; bucket += 4) { + long storedKey1 = table[bucket]; + long storedKey2 = table[bucket + 1]; + long storedValue1 = table[bucket + 2]; + long storedValue2 = table[bucket + 3]; + + if (!acquiredReadLock && !validate(stamp)) { + // Fallback to acquiring read lock + stamp = readLock(); + acquiredReadLock = true; + + storedKey1 = table[bucket]; + storedKey2 = table[bucket + 1]; + storedValue1 = table[bucket + 2]; + storedValue2 = table[bucket + 3]; + } + + if (storedKey1 != DeletedKey && storedKey1 != EmptyKey) { + processor.accept(storedKey1, storedKey2, storedValue1, storedValue2); + } + } + } finally { + if (acquiredReadLock) { + unlockRead(stamp); + } + } + } + + private void rehash(int newCapacity) { + long[] newTable = new long[4 * newCapacity]; + Arrays.fill(newTable, EmptyKey); + + // Re-hash table + for (int i = 0; i < table.length; i += 4) { + long storedKey1 = table[i]; + long storedKey2 = table[i + 1]; + long storedValue1 = table[i + 2]; + long storedValue2 = table[i + 3]; + if (storedKey1 != EmptyKey && storedKey1 != DeletedKey) { + insertKeyValueNoLock(newTable, newCapacity, storedKey1, storedKey2, storedValue1, storedValue2); + } + } + + table = newTable; + usedBuckets = size; + // Capacity needs to be updated after the values, so that we won't see + // a capacity value bigger than the actual array size + capacity = newCapacity; + resizeThresholdUp = (int) (capacity * mapFillFactor); + resizeThresholdBelow = (int) (capacity * mapIdleFactor); + } + + private static void insertKeyValueNoLock(long[] table, int capacity, long key1, long key2, long value1, + long value2) { + int bucket = signSafeMod(hash(key1, key2), capacity); + + while (true) { + long storedKey1 = table[bucket]; + + if (storedKey1 == EmptyKey) { + // The bucket is empty, so we can use it + table[bucket] = key1; + table[bucket + 1] = key2; + table[bucket + 2] = value1; + table[bucket + 3] = value2; + return; + } + + bucket = (bucket + 4) & (table.length - 1); + } + } + } + + private static final long HashMixer = 0xc6a4a7935bd1e995L; + private static final int R = 47; + + static final long hash(long key1, long key2) { + long hash = key1 * HashMixer; + hash ^= hash >>> R; + hash *= HashMixer; + hash += 31 + (key2 * HashMixer); + hash ^= hash >>> R; + hash *= HashMixer; + return hash; + } + + static final int signSafeMod(long n, int max) { + return (int) (n & (max - 1)) << 2; + } + + private static int alignToPowerOfTwo(int n) { + return (int) Math.pow(2, 32 - Integer.numberOfLeadingZeros(n - 1)); + } + + private static void checkBiggerEqualZero(long n) { + if (n < 0L) { + throw new IllegalArgumentException("Keys and values must be >= 0"); + } + } + + /** + * A pair of long values. + */ + public static class LongPair implements Comparable { + public final long first; + public final long second; + + public LongPair(long first, long second) { + this.first = first; + this.second = second; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof LongPair) { + LongPair other = (LongPair) obj; + return first == other.first && second == other.second; + } + return false; + } + + @Override + public int hashCode() { + return (int) hash(first, second); + } + + @Override + public int compareTo(LongPair o) { + if (first != o.first) { + return Long.compare(first, o.first); + } else { + return Long.compare(second, o.second); + } + } + } +} diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java index f1806c511e2ee..7b5e75813fa78 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java @@ -45,8 +45,74 @@ public class ConcurrentLongPairSet implements LongPairSet { private static final int DefaultExpectedItems = 256; private static final int DefaultConcurrencyLevel = 16; + private static final float DefaultMapFillFactor = 0.66f; + private static final float DefaultMapIdleFactor = 0.15f; + + private static final float DefaultExpandFactor = 2; + private static final float DefaultShrinkFactor = 2; + + private static final boolean DefaultAutoShrink = false; + private final Section[] sections; + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder of ConcurrentLongPairSet. + */ + public static class Builder { + int expectedItems = DefaultExpectedItems; + int concurrencyLevel = DefaultConcurrencyLevel; + float mapFillFactor = DefaultMapFillFactor; + float mapIdleFactor = DefaultMapIdleFactor; + float expandFactor = DefaultExpandFactor; + float shrinkFactor = DefaultShrinkFactor; + boolean autoShrink = DefaultAutoShrink; + + public Builder expectedItems(int expectedItems) { + this.expectedItems = expectedItems; + return this; + } + + public Builder concurrencyLevel(int concurrencyLevel) { + this.concurrencyLevel = concurrencyLevel; + return this; + } + + public Builder mapFillFactor(float mapFillFactor) { + this.mapFillFactor = mapFillFactor; + return this; + } + + public Builder mapIdleFactor(float mapIdleFactor) { + this.mapIdleFactor = mapIdleFactor; + return this; + } + + public Builder expandFactor(float expandFactor) { + this.expandFactor = expandFactor; + return this; + } + + public Builder shrinkFactor(float shrinkFactor) { + this.shrinkFactor = shrinkFactor; + return this; + } + + public Builder autoShrink(boolean autoShrink) { + this.autoShrink = autoShrink; + return this; + } + + public ConcurrentLongPairSet build() { + return new ConcurrentLongPairSet(expectedItems, concurrencyLevel, + mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor); + } + } + + /** * Represents a function that accepts an object of the {@code LongPair} type. */ @@ -61,18 +127,33 @@ public interface LongPairConsumer { void accept(long v1, long v2); } + @Deprecated public ConcurrentLongPairSet() { this(DefaultExpectedItems); } + @Deprecated public ConcurrentLongPairSet(int expectedItems) { this(expectedItems, DefaultConcurrencyLevel); } + @Deprecated public ConcurrentLongPairSet(int expectedItems, int concurrencyLevel) { + this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor, + DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor); + } + + public ConcurrentLongPairSet(int expectedItems, int concurrencyLevel, + float mapFillFactor, float mapIdleFactor, + boolean autoShrink, float expandFactor, float shrinkFactor) { checkArgument(expectedItems > 0); checkArgument(concurrencyLevel > 0); checkArgument(expectedItems >= concurrencyLevel); + checkArgument(mapFillFactor > 0 && mapFillFactor < 1); + checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1); + checkArgument(mapFillFactor > mapIdleFactor); + checkArgument(expandFactor > 1); + checkArgument(shrinkFactor > 1); int numSections = concurrencyLevel; int perSectionExpectedItems = expectedItems / numSections; @@ -80,10 +161,12 @@ public ConcurrentLongPairSet(int expectedItems, int concurrencyLevel) { this.sections = new Section[numSections]; for (int i = 0; i < numSections; i++) { - sections[i] = new Section(perSectionCapacity); + sections[i] = new Section(perSectionCapacity, mapFillFactor, mapIdleFactor, + autoShrink, expandFactor, shrinkFactor); } } + @Override public long size() { long size = 0; for (int i = 0; i < sections.length; i++) { @@ -92,6 +175,7 @@ public long size() { return size; } + @Override public long capacity() { long capacity = 0; for (int i = 0; i < sections.length; i++) { @@ -214,18 +298,33 @@ private static final class Section extends StampedLock { private volatile long[] table; private volatile int capacity; + private final int initCapacity; private static final AtomicIntegerFieldUpdater
SIZE_UPDATER = AtomicIntegerFieldUpdater .newUpdater(Section.class, "size"); private volatile int size; private int usedBuckets; - private int resizeThreshold; - - Section(int capacity) { + private int resizeThresholdUp; + private int resizeThresholdBelow; + private final float mapFillFactor; + private final float mapIdleFactor; + private final float expandFactor; + private final float shrinkFactor; + private final boolean autoShrink; + + Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink, + float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); + this.initCapacity = this.capacity; this.table = new long[2 * this.capacity]; this.size = 0; this.usedBuckets = 0; - this.resizeThreshold = (int) (this.capacity * SetFillFactor); + this.autoShrink = autoShrink; + this.mapFillFactor = mapFillFactor; + this.mapIdleFactor = mapIdleFactor; + this.expandFactor = expandFactor; + this.shrinkFactor = shrinkFactor; + this.resizeThresholdUp = (int) (this.capacity * mapFillFactor); + this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor); Arrays.fill(table, EmptyItem); } @@ -314,9 +413,11 @@ boolean add(long item1, long item2, long hash) { bucket = (bucket + 2) & (table.length - 1); } } finally { - if (usedBuckets > resizeThreshold) { + if (usedBuckets > resizeThresholdUp) { try { - rehash(); + // Expand the hashmap + int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor)); + rehash(newCapacity); } finally { unlockWrite(stamp); } @@ -347,7 +448,7 @@ private boolean remove(long item1, long item2, int hash) { bucket = (bucket + 2) & (table.length - 1); } } finally { - unlockWrite(stamp); + tryShrinkThenUnlock(stamp); } } @@ -356,29 +457,59 @@ private int removeIf(LongPairPredicate filter) { int removedItems = 0; // Go through all the buckets for this section - for (int bucket = 0; bucket < table.length; bucket += 2) { - long storedItem1 = table[bucket]; - long storedItem2 = table[bucket + 1]; - - if (storedItem1 != DeletedItem && storedItem1 != EmptyItem) { - if (filter.test(storedItem1, storedItem2)) { - long h = hash(storedItem1, storedItem2); - if (remove(storedItem1, storedItem2, (int) h)) { + long stamp = writeLock(); + try { + for (int bucket = 0; bucket < table.length; bucket += 2) { + long storedItem1 = table[bucket]; + long storedItem2 = table[bucket + 1]; + if (storedItem1 != DeletedItem && storedItem1 != EmptyItem) { + if (filter.test(storedItem1, storedItem2)) { + SIZE_UPDATER.decrementAndGet(this); + cleanBucket(bucket); removedItems++; } } } + } finally { + tryShrinkThenUnlock(stamp); } - return removedItems; } + private void tryShrinkThenUnlock(long stamp) { + if (autoShrink && size < resizeThresholdBelow) { + try { + int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor)); + int newResizeThresholdUp = (int) (newCapacity * mapFillFactor); + if (newCapacity < capacity && newResizeThresholdUp > size) { + // shrink the hashmap + rehash(newCapacity); + } + } finally { + unlockWrite(stamp); + } + } else { + unlockWrite(stamp); + } + } + private void cleanBucket(int bucket) { int nextInArray = (bucket + 2) & (table.length - 1); if (table[nextInArray] == EmptyItem) { table[bucket] = EmptyItem; table[bucket + 1] = EmptyItem; --usedBuckets; + + // Cleanup all the buckets that were in `DeletedItem` state, + // so that we can reduce unnecessary expansions + int lastBucket = (bucket - 2) & (table.length - 1); + while (table[lastBucket] == DeletedItem) { + table[lastBucket] = EmptyItem; + table[lastBucket + 1] = EmptyItem; + --usedBuckets; + + lastBucket = (lastBucket - 2) & (table.length - 1); + } } else { table[bucket] = DeletedItem; table[bucket + 1] = DeletedItem; @@ -392,6 +523,9 @@ void clear() { Arrays.fill(table, EmptyItem); this.size = 0; this.usedBuckets = 0; + if (autoShrink) { + rehash(initCapacity); + } } finally { unlockWrite(stamp); } @@ -431,9 +565,8 @@ public void forEach(LongPairConsumer processor) { } } - private void rehash() { + private void rehash(int newCapacity) { // Expand the hashmap - int newCapacity = capacity * 2; long[] newTable = new long[2 * newCapacity]; Arrays.fill(newTable, EmptyItem); @@ -451,7 +584,8 @@ private void rehash() { // Capacity needs to be updated after the values, so that we won't see // a capacity value bigger than the actual array size capacity = newCapacity; - resizeThreshold = (int) (capacity * SetFillFactor); + resizeThresholdUp = (int) (capacity * mapFillFactor); + resizeThresholdBelow = (int) (capacity * mapIdleFactor); } private static void insertKeyValueNoLock(long[] table, int capacity, long item1, long item2) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java index 47927a98eeaf3..f82bf11a90e13 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java @@ -20,6 +20,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -42,36 +43,144 @@ public class ConcurrentOpenHashMap { private static final Object EmptyKey = null; private static final Object DeletedKey = new Object(); - private static final float MapFillFactor = 0.66f; + /** + * This object is used to delete empty value in this map. + * EmptyValue.equals(null) = true. + */ + private static final Object EmptyValue = new Object() { + + @SuppressFBWarnings + @Override + public boolean equals(Object obj) { + return obj == null; + } + + /** + * This is just for avoiding spotbugs errors + */ + @Override + public int hashCode() { + return super.hashCode(); + } + }; private static final int DefaultExpectedItems = 256; private static final int DefaultConcurrencyLevel = 16; + private static final float DefaultMapFillFactor = 0.66f; + private static final float DefaultMapIdleFactor = 0.15f; + + private static final float DefaultExpandFactor = 2; + private static final float DefaultShrinkFactor = 2; + + private static final boolean DefaultAutoShrink = false; + private final Section[] sections; + public static Builder newBuilder() { + return new Builder<>(); + } + + /** + * Builder of ConcurrentOpenHashMap. + */ + public static class Builder { + int expectedItems = DefaultExpectedItems; + int concurrencyLevel = DefaultConcurrencyLevel; + float mapFillFactor = DefaultMapFillFactor; + float mapIdleFactor = DefaultMapIdleFactor; + float expandFactor = DefaultExpandFactor; + float shrinkFactor = DefaultShrinkFactor; + boolean autoShrink = DefaultAutoShrink; + + public Builder expectedItems(int expectedItems) { + this.expectedItems = expectedItems; + return this; + } + + public Builder concurrencyLevel(int concurrencyLevel) { + this.concurrencyLevel = concurrencyLevel; + return this; + } + + public Builder mapFillFactor(float mapFillFactor) { + this.mapFillFactor = mapFillFactor; + return this; + } + + public Builder mapIdleFactor(float mapIdleFactor) { + this.mapIdleFactor = mapIdleFactor; + return this; + } + + public Builder expandFactor(float expandFactor) { + this.expandFactor = expandFactor; + return this; + } + + public Builder shrinkFactor(float shrinkFactor) { + this.shrinkFactor = shrinkFactor; + return this; + } + + public Builder autoShrink(boolean autoShrink) { + this.autoShrink = autoShrink; + return this; + } + + public ConcurrentOpenHashMap build() { + return new ConcurrentOpenHashMap<>(expectedItems, concurrencyLevel, + mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor); + } + } + + @Deprecated public ConcurrentOpenHashMap() { this(DefaultExpectedItems); } + @Deprecated public ConcurrentOpenHashMap(int expectedItems) { this(expectedItems, DefaultConcurrencyLevel); } + @Deprecated public ConcurrentOpenHashMap(int expectedItems, int concurrencyLevel) { + this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor, + DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor); + } + + public ConcurrentOpenHashMap(int expectedItems, int concurrencyLevel, + float mapFillFactor, float mapIdleFactor, + boolean autoShrink, float expandFactor, float shrinkFactor) { checkArgument(expectedItems > 0); checkArgument(concurrencyLevel > 0); checkArgument(expectedItems >= concurrencyLevel); + checkArgument(mapFillFactor > 0 && mapFillFactor < 1); + checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1); + checkArgument(mapFillFactor > mapIdleFactor); + checkArgument(expandFactor > 1); + checkArgument(shrinkFactor > 1); int numSections = concurrencyLevel; int perSectionExpectedItems = expectedItems / numSections; - int perSectionCapacity = (int) (perSectionExpectedItems / MapFillFactor); + int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor); this.sections = (Section[]) new Section[numSections]; for (int i = 0; i < numSections; i++) { - sections[i] = new Section<>(perSectionCapacity); + sections[i] = new Section<>(perSectionCapacity, mapFillFactor, mapIdleFactor, + autoShrink, expandFactor, shrinkFactor); } } + long getUsedBucketCount() { + long usedBucketCount = 0; + for (Section s : sections) { + usedBucketCount += s.usedBuckets; + } + return usedBucketCount; + } + public long size() { long size = 0; for (Section s : sections) { @@ -142,6 +251,10 @@ public boolean remove(K key, Object value) { return getSection(h).remove(key, value, (int) h) != null; } + public void removeNullValue(K key) { + remove(key, EmptyValue); + } + private Section getSection(long hash) { // Use 32 msb out of long to get the section final int sectionIdx = (int) (hash >>> 32) & (sections.length - 1); @@ -182,18 +295,33 @@ private static final class Section extends StampedLock { private volatile Object[] table; private volatile int capacity; + private final int initCapacity; private static final AtomicIntegerFieldUpdater
SIZE_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Section.class, "size"); private volatile int size; private int usedBuckets; - private int resizeThreshold; - - Section(int capacity) { + private int resizeThresholdUp; + private int resizeThresholdBelow; + private final float mapFillFactor; + private final float mapIdleFactor; + private final float expandFactor; + private final float shrinkFactor; + private final boolean autoShrink; + + Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink, + float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); + this.initCapacity = this.capacity; this.table = new Object[2 * this.capacity]; this.size = 0; this.usedBuckets = 0; - this.resizeThreshold = (int) (this.capacity * MapFillFactor); + this.autoShrink = autoShrink; + this.mapFillFactor = mapFillFactor; + this.mapIdleFactor = mapIdleFactor; + this.expandFactor = expandFactor; + this.shrinkFactor = shrinkFactor; + this.resizeThresholdUp = (int) (this.capacity * mapFillFactor); + this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor); } V get(K key, int keyHash) { @@ -290,9 +418,11 @@ V put(K key, V value, int keyHash, boolean onlyIfAbsent, Function valuePro bucket = (bucket + 2) & (table.length - 1); } } finally { - if (usedBuckets > resizeThreshold) { + if (usedBuckets > resizeThresholdUp) { try { - rehash(); + // Expand the hashmap + int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor)); + rehash(newCapacity); } finally { unlockWrite(stamp); } @@ -319,6 +449,17 @@ private V remove(K key, Object value, int keyHash) { table[bucket] = EmptyKey; table[bucket + 1] = null; --usedBuckets; + + // Cleanup all the buckets that were in `DeletedKey` state, + // so that we can reduce unnecessary expansions + int lastBucket = (bucket - 2) & (table.length - 1); + while (table[lastBucket] == DeletedKey) { + table[lastBucket] = EmptyKey; + table[lastBucket + 1] = null; + --usedBuckets; + + lastBucket = (lastBucket - 2) & (table.length - 1); + } } else { table[bucket] = DeletedKey; table[bucket + 1] = null; @@ -337,7 +478,20 @@ private V remove(K key, Object value, int keyHash) { } } finally { - unlockWrite(stamp); + if (autoShrink && size < resizeThresholdBelow) { + try { + int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor)); + int newResizeThresholdUp = (int) (newCapacity * mapFillFactor); + if (newCapacity < capacity && newResizeThresholdUp > size) { + // shrink the hashmap + rehash(newCapacity); + } + } finally { + unlockWrite(stamp); + } + } else { + unlockWrite(stamp); + } } } @@ -348,6 +502,9 @@ void clear() { Arrays.fill(table, EmptyKey); this.size = 0; this.usedBuckets = 0; + if (autoShrink) { + rehash(initCapacity); + } } finally { unlockWrite(stamp); } @@ -389,9 +546,8 @@ public void forEach(BiConsumer processor) { } } - private void rehash() { + private void rehash(int newCapacity) { // Expand the hashmap - int newCapacity = capacity * 2; Object[] newTable = new Object[2 * newCapacity]; // Re-hash table @@ -406,7 +562,8 @@ private void rehash() { table = newTable; capacity = newCapacity; usedBuckets = size; - resizeThreshold = (int) (capacity * MapFillFactor); + resizeThresholdUp = (int) (capacity * mapFillFactor); + resizeThresholdBelow = (int) (capacity * mapIdleFactor); } private static void insertKeyValueNoLock(Object[] table, int capacity, K key, V value) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java index 8b77d9052b3bb..cf5ed7ccdc8d9 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java @@ -43,36 +43,123 @@ public class ConcurrentOpenHashSet { private static final Object EmptyValue = null; private static final Object DeletedValue = new Object(); - private static final float MapFillFactor = 0.66f; - private static final int DefaultExpectedItems = 256; private static final int DefaultConcurrencyLevel = 16; + private static final float DefaultMapFillFactor = 0.66f; + private static final float DefaultMapIdleFactor = 0.15f; + + private static final float DefaultExpandFactor = 2; + private static final float DefaultShrinkFactor = 2; + + private static final boolean DefaultAutoShrink = false; + private final Section[] sections; + public static Builder newBuilder() { + return new Builder<>(); + } + + /** + * Builder of ConcurrentOpenHashSet. + */ + public static class Builder { + int expectedItems = DefaultExpectedItems; + int concurrencyLevel = DefaultConcurrencyLevel; + float mapFillFactor = DefaultMapFillFactor; + float mapIdleFactor = DefaultMapIdleFactor; + float expandFactor = DefaultExpandFactor; + float shrinkFactor = DefaultShrinkFactor; + boolean autoShrink = DefaultAutoShrink; + + public Builder expectedItems(int expectedItems) { + this.expectedItems = expectedItems; + return this; + } + + public Builder concurrencyLevel(int concurrencyLevel) { + this.concurrencyLevel = concurrencyLevel; + return this; + } + + public Builder mapFillFactor(float mapFillFactor) { + this.mapFillFactor = mapFillFactor; + return this; + } + + public Builder mapIdleFactor(float mapIdleFactor) { + this.mapIdleFactor = mapIdleFactor; + return this; + } + + public Builder expandFactor(float expandFactor) { + this.expandFactor = expandFactor; + return this; + } + + public Builder shrinkFactor(float shrinkFactor) { + this.shrinkFactor = shrinkFactor; + return this; + } + + public Builder autoShrink(boolean autoShrink) { + this.autoShrink = autoShrink; + return this; + } + + public ConcurrentOpenHashSet build() { + return new ConcurrentOpenHashSet<>(expectedItems, concurrencyLevel, + mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor); + } + } + + @Deprecated public ConcurrentOpenHashSet() { this(DefaultExpectedItems); } + @Deprecated public ConcurrentOpenHashSet(int expectedItems) { this(expectedItems, DefaultConcurrencyLevel); } + @Deprecated public ConcurrentOpenHashSet(int expectedItems, int concurrencyLevel) { + this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor, + DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor); + } + + public ConcurrentOpenHashSet(int expectedItems, int concurrencyLevel, + float mapFillFactor, float mapIdleFactor, + boolean autoShrink, float expandFactor, float shrinkFactor) { checkArgument(expectedItems > 0); checkArgument(concurrencyLevel > 0); checkArgument(expectedItems >= concurrencyLevel); + checkArgument(mapFillFactor > 0 && mapFillFactor < 1); + checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1); + checkArgument(mapFillFactor > mapIdleFactor); + checkArgument(expandFactor > 1); + checkArgument(shrinkFactor > 1); int numSections = concurrencyLevel; int perSectionExpectedItems = expectedItems / numSections; - int perSectionCapacity = (int) (perSectionExpectedItems / MapFillFactor); + int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor); this.sections = (Section[]) new Section[numSections]; for (int i = 0; i < numSections; i++) { - sections[i] = new Section<>(perSectionCapacity); + sections[i] = new Section<>(perSectionCapacity, mapFillFactor, mapIdleFactor, + autoShrink, expandFactor, shrinkFactor); } } + long getUsedBucketCount() { + long usedBucketCount = 0; + for (Section s : sections) { + usedBucketCount += s.usedBuckets; + } + return usedBucketCount; + } + public long size() { long size = 0; for (int i = 0; i < sections.length; i++) { @@ -177,18 +264,33 @@ private static final class Section extends StampedLock { private volatile V[] values; private volatile int capacity; + private final int initCapacity; private static final AtomicIntegerFieldUpdater
SIZE_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Section.class, "size"); private volatile int size; private int usedBuckets; - private int resizeThreshold; - - Section(int capacity) { + private int resizeThresholdUp; + private int resizeThresholdBelow; + private final float mapFillFactor; + private final float mapIdleFactor; + private final float expandFactor; + private final float shrinkFactor; + private final boolean autoShrink; + + Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink, + float expandFactor, float shrinkFactor) { this.capacity = alignToPowerOfTwo(capacity); + this.initCapacity = this.capacity; this.values = (V[]) new Object[this.capacity]; this.size = 0; this.usedBuckets = 0; - this.resizeThreshold = (int) (this.capacity * MapFillFactor); + this.autoShrink = autoShrink; + this.mapFillFactor = mapFillFactor; + this.mapIdleFactor = mapIdleFactor; + this.expandFactor = expandFactor; + this.shrinkFactor = shrinkFactor; + this.resizeThresholdUp = (int) (this.capacity * mapFillFactor); + this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor); } boolean contains(V value, int keyHash) { @@ -284,9 +386,11 @@ boolean add(V value, int keyHash) { ++bucket; } } finally { - if (usedBuckets > resizeThreshold) { + if (usedBuckets > resizeThresholdUp) { try { - rehash(); + // Expand the hashmap + int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor)); + rehash(newCapacity); } finally { unlockWrite(stamp); } @@ -319,7 +423,20 @@ private boolean remove(V value, int keyHash) { } } finally { - unlockWrite(stamp); + if (autoShrink && size < resizeThresholdBelow) { + try { + int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor)); + int newResizeThresholdUp = (int) (newCapacity * mapFillFactor); + if (newCapacity < capacity && newResizeThresholdUp > size) { + // shrink the hashmap + rehash(newCapacity); + } + } finally { + unlockWrite(stamp); + } + } else { + unlockWrite(stamp); + } } } @@ -330,6 +447,9 @@ void clear() { Arrays.fill(values, EmptyValue); this.size = 0; this.usedBuckets = 0; + if (autoShrink) { + rehash(initCapacity); + } } finally { unlockWrite(stamp); } @@ -365,6 +485,16 @@ private void cleanBucket(int bucket) { if (values[nextInArray] == EmptyValue) { values[bucket] = (V) EmptyValue; --usedBuckets; + + // Cleanup all the buckets that were in `DeletedValue` state, + // so that we can reduce unnecessary expansions + int lastBucket = signSafeMod(bucket - 1, capacity); + while (values[lastBucket] == DeletedValue) { + values[lastBucket] = (V) EmptyValue; + --usedBuckets; + + lastBucket = signSafeMod(lastBucket - 1, capacity); + } } else { values[bucket] = (V) DeletedValue; } @@ -402,9 +532,8 @@ public void forEach(Consumer processor) { } } - private void rehash() { + private void rehash(int newCapacity) { // Expand the hashmap - int newCapacity = capacity * 2; V[] newValues = (V[]) new Object[newCapacity]; // Re-hash table @@ -418,7 +547,8 @@ private void rehash() { values = newValues; capacity = newCapacity; usedBuckets = size; - resizeThreshold = (int) (capacity * MapFillFactor); + resizeThresholdUp = (int) (capacity * mapFillFactor); + resizeThresholdBelow = (int) (capacity * mapIdleFactor); } private static void insertValueNoLock(V[] values, V value) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSet.java index 174f318aa1b61..08a63141f4cca 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSet.java @@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.mutable.MutableInt; /** * A Concurrent set comprising zero or more ranges of type {@link LongPair}. This can be alternative of @@ -152,14 +153,12 @@ public boolean isEmpty() { if (rangeBitSetMap.isEmpty()) { return true; } - AtomicBoolean isEmpty = new AtomicBoolean(false); - rangeBitSetMap.forEach((key, val) -> { - if (!isEmpty.get()) { - return; + for (BitSet rangeBitSet : rangeBitSetMap.values()) { + if (!rangeBitSet.isEmpty()) { + return false; } - isEmpty.set(val.isEmpty()); - }); - return isEmpty.get(); + } + return true; } @Override @@ -244,6 +243,29 @@ public Range lastRange() { return Range.openClosed(consumer.apply(lastSet.getKey(), lower), consumer.apply(lastSet.getKey(), upper)); } + @Override + public int cardinality(long lowerKey, long lowerValue, long upperKey, long upperValue) { + NavigableMap subMap = rangeBitSetMap.subMap(lowerKey, true, upperKey, true); + MutableInt v = new MutableInt(0); + subMap.forEach((key, bitset) -> { + if (key == lowerKey || key == upperKey) { + BitSet temp = (BitSet) bitset.clone(); + // Trim the bitset index which < lowerValue + if (key == lowerKey) { + temp.clear(0, (int) Math.max(0, lowerValue)); + } + // Trim the bitset index which > upperValue + if (key == upperKey) { + temp.clear((int) Math.min(upperValue + 1, temp.length()), temp.length()); + } + v.add(temp.cardinality()); + } else { + v.add(bitset.cardinality()); + } + }); + return v.intValue(); + } + @Override public int size() { if (updatedAfterCachedForSize) { diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java index d3321f9ad35ac..06efd0490d184 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java @@ -48,14 +48,15 @@ public class ConcurrentSortedLongPairSet implements LongPairSet { protected final NavigableMap longPairSets = new ConcurrentSkipListMap<>(); - private int expectedItems; - private int concurrencyLevel; + private final int expectedItems; + private final int concurrencyLevel; /** * If {@link #longPairSets} adds and removes the item-set frequently then it allocates and removes * {@link ConcurrentLongPairSet} for the same item multiple times which can lead to gc-puases. To avoid such * situation, avoid removing empty LogPairSet until it reaches max limit. */ - private int maxAllowedSetOnRemove; + private final int maxAllowedSetOnRemove; + private final boolean autoShrink; private static final int DEFAULT_MAX_ALLOWED_SET_ON_REMOVE = 10; public ConcurrentSortedLongPairSet() { @@ -70,16 +71,30 @@ public ConcurrentSortedLongPairSet(int expectedItems, int concurrencyLevel) { this(expectedItems, concurrencyLevel, DEFAULT_MAX_ALLOWED_SET_ON_REMOVE); } + public ConcurrentSortedLongPairSet(int expectedItems, int concurrencyLevel, boolean autoShrink) { + this(expectedItems, concurrencyLevel, DEFAULT_MAX_ALLOWED_SET_ON_REMOVE, autoShrink); + } + public ConcurrentSortedLongPairSet(int expectedItems, int concurrencyLevel, int maxAllowedSetOnRemove) { + this(expectedItems, concurrencyLevel, maxAllowedSetOnRemove, false); + } + + public ConcurrentSortedLongPairSet(int expectedItems, int concurrencyLevel, int maxAllowedSetOnRemove, + boolean autoShrink) { this.expectedItems = expectedItems; this.concurrencyLevel = concurrencyLevel; this.maxAllowedSetOnRemove = maxAllowedSetOnRemove; + this.autoShrink = autoShrink; } @Override public boolean add(long item1, long item2) { ConcurrentLongPairSet messagesToReplay = longPairSets.computeIfAbsent(item1, - (key) -> new ConcurrentLongPairSet(expectedItems, concurrencyLevel)); + (key) -> ConcurrentLongPairSet.newBuilder() + .expectedItems(expectedItems) + .concurrencyLevel(concurrencyLevel) + .autoShrink(autoShrink) + .build()); return messagesToReplay.add(item1, item2); } @@ -166,13 +181,15 @@ public String toString() { @Override public boolean isEmpty() { - AtomicBoolean isEmpty = new AtomicBoolean(true); - longPairSets.forEach((item1, longPairSet) -> { - if (isEmpty.get() && !longPairSet.isEmpty()) { - isEmpty.set(false); + if (longPairSets.isEmpty()) { + return true; + } + for (ConcurrentLongPairSet subSet : longPairSets.values()) { + if (!subSet.isEmpty()) { + return false; } - }); - return isEmpty.get(); + } + return true; } @Override @@ -189,6 +206,15 @@ public long size() { return size.get(); } + @Override + public long capacity() { + AtomicLong capacity = new AtomicLong(0); + longPairSets.forEach((item1, longPairSet) -> { + capacity.getAndAdd(longPairSet.capacity()); + }); + return capacity.get(); + } + @Override public boolean contains(long item1, long item2) { ConcurrentLongPairSet longPairSet = longPairSets.get(item1); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairRangeSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairRangeSet.java index 15f32252b90a1..c41a1a1f13fff 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairRangeSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairRangeSet.java @@ -127,6 +127,11 @@ public interface LongPairRangeSet> { */ Range lastRange(); + /** + * Return the number bit sets to true from lower (inclusive) to upper (inclusive). + */ + int cardinality(long lowerKey, long lowerValue, long upperKey, long upperValue); + /** * Represents a function that accepts two long arguments and produces a result. * @@ -290,6 +295,11 @@ public Range lastRange() { return list.get(list.size() - 1); } + @Override + public int cardinality(long lowerKey, long lowerValue, long upperKey, long upperValue) { + throw new UnsupportedOperationException(); + } + @Override public int size() { return set.asRanges().size(); diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java index 32de7e4c232bd..f27b994f777d2 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java @@ -107,6 +107,13 @@ public interface LongPairPredicate { */ long size(); + /** + * Returns capacity of the set. + * + * @return + */ + long capacity(); + /** * Checks if given (item1,item2) composite value exists into set. * diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/SegmentedLongArray.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/SegmentedLongArray.java new file mode 100644 index 0000000000000..dc4d1c4908c48 --- /dev/null +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/SegmentedLongArray.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util.collections; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import java.util.ArrayList; +import java.util.List; +import javax.annotation.concurrent.NotThreadSafe; +import lombok.Getter; + +@NotThreadSafe +public class SegmentedLongArray implements AutoCloseable { + + private static final int SIZE_OF_LONG = 8; + + private static final int MAX_SEGMENT_SIZE = 2 * 1024 * 1024; // 2M longs -> 16 MB + private final List buffers = new ArrayList<>(); + + @Getter + private final long initialCapacity; + + @Getter + private long capacity; + + public SegmentedLongArray(long initialCapacity) { + long remainingToAdd = initialCapacity; + + // Add first segment + int sizeToAdd = (int) Math.min(remainingToAdd, MAX_SEGMENT_SIZE); + ByteBuf buffer = PooledByteBufAllocator.DEFAULT.directBuffer(sizeToAdd * SIZE_OF_LONG); + buffer.writerIndex(sizeToAdd * SIZE_OF_LONG); + buffers.add(buffer); + remainingToAdd -= sizeToAdd; + + // Add the remaining segments, all at full segment size, if necessary + while (remainingToAdd > 0) { + buffer = PooledByteBufAllocator.DEFAULT.directBuffer(MAX_SEGMENT_SIZE * SIZE_OF_LONG); + buffer.writerIndex(MAX_SEGMENT_SIZE * SIZE_OF_LONG); + buffers.add(buffer); + remainingToAdd -= MAX_SEGMENT_SIZE; + } + + this.initialCapacity = initialCapacity; + this.capacity = this.initialCapacity; + } + + public void writeLong(long offset, long value) { + int bufferIdx = (int) (offset / MAX_SEGMENT_SIZE); + int internalIdx = (int) (offset % MAX_SEGMENT_SIZE); + buffers.get(bufferIdx).setLong(internalIdx * SIZE_OF_LONG, value); + } + + public long readLong(long offset) { + int bufferIdx = (int) (offset / MAX_SEGMENT_SIZE); + int internalIdx = (int) (offset % MAX_SEGMENT_SIZE); + return buffers.get(bufferIdx).getLong(internalIdx * SIZE_OF_LONG); + } + + public void increaseCapacity() { + if (capacity < MAX_SEGMENT_SIZE) { + // Resize the current buffer to bigger capacity + capacity += (capacity <= 256 ? capacity : capacity / 2); + capacity = Math.min(capacity, MAX_SEGMENT_SIZE); + buffers.get(0).capacity((int) this.capacity * SIZE_OF_LONG); + buffers.get(0).writerIndex((int) this.capacity * SIZE_OF_LONG); + } else { + // Let's add 1 mode buffer to the list + int bufferSize = MAX_SEGMENT_SIZE * SIZE_OF_LONG; + ByteBuf buffer = PooledByteBufAllocator.DEFAULT.directBuffer(bufferSize, bufferSize); + buffer.writerIndex(bufferSize); + buffers.add(buffer); + capacity += MAX_SEGMENT_SIZE; + } + } + + public void shrink(long newCapacity) { + if (newCapacity >= capacity || newCapacity < initialCapacity) { + return; + } + + long sizeToReduce = capacity - newCapacity; + while (sizeToReduce >= MAX_SEGMENT_SIZE && buffers.size() > 1) { + ByteBuf b = buffers.remove(buffers.size() - 1); + b.release(); + capacity -= MAX_SEGMENT_SIZE; + sizeToReduce -= MAX_SEGMENT_SIZE; + } + + if (buffers.size() == 1 && sizeToReduce > 0) { + // We should also reduce the capacity of the first buffer + capacity -= sizeToReduce; + ByteBuf oldBuffer = buffers.get(0); + ByteBuf newBuffer = PooledByteBufAllocator.DEFAULT.directBuffer((int) capacity * SIZE_OF_LONG); + oldBuffer.getBytes(0, newBuffer, (int) capacity * SIZE_OF_LONG); + oldBuffer.release(); + buffers.set(0, newBuffer); + } + } + + @Override + public void close() { + buffers.forEach(ByteBuf::release); + } + + /** + * The amount of memory used to back the array of longs. + */ + public long bytesCapacity() { + return capacity * SIZE_OF_LONG; + } +} diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueue.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueue.java index 1d8d909beae79..50288247c643c 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueue.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueue.java @@ -19,8 +19,6 @@ package org.apache.pulsar.common.util.collections; import static com.google.common.base.Preconditions.checkArgument; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.PooledByteBufAllocator; /** * Provides a priority-queue implementation specialized on items composed by 3 longs. @@ -28,19 +26,28 @@ *

This class is not thread safe and the items are stored in direct memory. */ public class TripleLongPriorityQueue implements AutoCloseable { - - private static final int SIZE_OF_LONG = 8; private static final int DEFAULT_INITIAL_CAPACITY = 16; + private static final float DEFAULT_SHRINK_FACTOR = 0.5f; // Each item is composed of 3 longs private static final int ITEMS_COUNT = 3; - private static final int TUPLE_SIZE = ITEMS_COUNT * SIZE_OF_LONG; + /** + * Reserve 10% of the capacity when shrinking to avoid frequent expansion and shrinkage. + */ + private static final float RESERVATION_FACTOR = 0.9f; + + private final SegmentedLongArray array; + + // Count of how many (long,long,long) tuples are currently inserted + private long tuplesCount; - private final ByteBuf buffer; + /** + * When size < capacity * shrinkFactor, may trigger shrinking. + */ + private final float shrinkFactor; - private int capacity; - private int size; + private long shrinkThreshold; /** * Create a new priority queue with default initial capacity. @@ -49,14 +56,21 @@ public TripleLongPriorityQueue() { this(DEFAULT_INITIAL_CAPACITY); } + public TripleLongPriorityQueue(long initialCapacity, float shrinkFactor) { + checkArgument(initialCapacity > 0); + checkArgument(shrinkFactor > 0); + this.array = new SegmentedLongArray(initialCapacity * ITEMS_COUNT); + this.tuplesCount = 0; + this.shrinkThreshold = (long) (initialCapacity * shrinkFactor); + this.shrinkFactor = shrinkFactor; + } + /** * Create a new priority queue with a given initial capacity. * @param initialCapacity */ public TripleLongPriorityQueue(int initialCapacity) { - capacity = initialCapacity; - buffer = PooledByteBufAllocator.DEFAULT.directBuffer(initialCapacity * ITEMS_COUNT * SIZE_OF_LONG); - size = 0; + this(initialCapacity, DEFAULT_SHRINK_FACTOR); } /** @@ -64,7 +78,7 @@ public TripleLongPriorityQueue(int initialCapacity) { */ @Override public void close() { - buffer.release(); + array.close(); } /** @@ -75,13 +89,14 @@ public void close() { * @param n3 */ public void add(long n1, long n2, long n3) { - if (size == capacity) { - increaseCapacity(); + long arrayIdx = tuplesCount * ITEMS_COUNT; + if ((arrayIdx + 2) >= array.getCapacity()) { + array.increaseCapacity(); } - put(size, n1, n2, n3); - siftUp(size); - ++size; + put(tuplesCount, n1, n2, n3); + siftUp(tuplesCount); + ++tuplesCount; } /** @@ -90,8 +105,8 @@ public void add(long n1, long n2, long n3) { *

The tuple will not be extracted */ public long peekN1() { - checkArgument(size != 0); - return buffer.getLong(0); + checkArgument(tuplesCount != 0); + return array.readLong(0); } /** @@ -100,8 +115,8 @@ public long peekN1() { *

The tuple will not be extracted */ public long peekN2() { - checkArgument(size != 0); - return buffer.getLong(0 + 1 * SIZE_OF_LONG); + checkArgument(tuplesCount != 0); + return array.readLong(1); } /** @@ -110,123 +125,144 @@ public long peekN2() { *

The tuple will not be extracted */ public long peekN3() { - checkArgument(size != 0); - return buffer.getLong(0 + 2 * SIZE_OF_LONG); + checkArgument(tuplesCount != 0); + return array.readLong(2); } /** * Removes the first item from the queue. */ public void pop() { - checkArgument(size != 0); - swap(0, size - 1); - size--; + checkArgument(tuplesCount != 0); + swap(0, tuplesCount - 1); + tuplesCount--; siftDown(0); + shrinkCapacity(); } /** * Returns whether the priority queue is empty. */ public boolean isEmpty() { - return size == 0; + return tuplesCount == 0; } /** * Returns the number of tuples in the priority queue. */ - public int size() { - return size; + public long size() { + return tuplesCount; + } + + /** + * The amount of memory used to back the priority queue. + */ + public long bytesCapacity() { + return array.bytesCapacity(); } /** * Clear all items. */ public void clear() { - this.buffer.clear(); - this.size = 0; + this.tuplesCount = 0; + shrinkCapacity(); } - private void increaseCapacity() { - // For bigger sizes, increase by 50% - this.capacity += (capacity <= 256 ? capacity : capacity / 2); - buffer.capacity(this.capacity * TUPLE_SIZE); + private void shrinkCapacity() { + if (tuplesCount <= shrinkThreshold && array.getCapacity() > array.getInitialCapacity()) { + long sizeToShrink = (long) (array.getCapacity() * shrinkFactor * RESERVATION_FACTOR); + if (sizeToShrink == 0) { + return; + } + + long newCapacity; + if (array.getCapacity() - sizeToShrink <= array.getInitialCapacity()) { + newCapacity = array.getInitialCapacity(); + } else { + newCapacity = array.getCapacity() - sizeToShrink; + } + + array.shrink(newCapacity); + this.shrinkThreshold = (long) (array.getCapacity() / (double) ITEMS_COUNT * shrinkFactor); + } } - private void siftUp(int idx) { - while (idx > 0) { - int parentIdx = (idx - 1) / 2; - if (compare(idx, parentIdx) >= 0) { + private void siftUp(long tupleIdx) { + while (tupleIdx > 0) { + long parentIdx = (tupleIdx - 1) / 2; + if (compare(tupleIdx, parentIdx) >= 0) { break; } - swap(idx, parentIdx); - idx = parentIdx; + swap(tupleIdx, parentIdx); + tupleIdx = parentIdx; } } - private void siftDown(int idx) { - int half = size / 2; - while (idx < half) { - int left = 2 * idx + 1; - int right = 2 * idx + 2; + private void siftDown(long tupleIdx) { + long half = tuplesCount / 2; + while (tupleIdx < half) { + long left = 2 * tupleIdx + 1; + long right = 2 * tupleIdx + 2; - int swapIdx = idx; + long swapIdx = tupleIdx; - if (compare(idx, left) > 0) { + if (compare(tupleIdx, left) > 0) { swapIdx = left; } - if (right < size && compare(swapIdx, right) > 0) { + if (right < tuplesCount && compare(swapIdx, right) > 0) { swapIdx = right; } - if (swapIdx == idx) { + if (swapIdx == tupleIdx) { return; } - swap(idx, swapIdx); - idx = swapIdx; + swap(tupleIdx, swapIdx); + tupleIdx = swapIdx; } } - private void put(int idx, long n1, long n2, long n3) { - int i = idx * TUPLE_SIZE; - buffer.setLong(i, n1); - buffer.setLong(i + 1 * SIZE_OF_LONG, n2); - buffer.setLong(i + 2 * SIZE_OF_LONG, n3); + private void put(long tupleIdx, long n1, long n2, long n3) { + long idx = tupleIdx * ITEMS_COUNT; + array.writeLong(idx, n1); + array.writeLong(idx + 1, n2); + array.writeLong(idx + 2, n3); } - private int compare(int idx1, int idx2) { - int i1 = idx1 * TUPLE_SIZE; - int i2 = idx2 * TUPLE_SIZE; + private int compare(long tupleIdx1, long tupleIdx2) { + long idx1 = tupleIdx1 * ITEMS_COUNT; + long idx2 = tupleIdx2 * ITEMS_COUNT; - int c1 = Long.compare(buffer.getLong(i1), buffer.getLong(i2)); + int c1 = Long.compare(array.readLong(idx1), array.readLong(idx2)); if (c1 != 0) { return c1; } - int c2 = Long.compare(buffer.getLong(i1 + SIZE_OF_LONG), buffer.getLong(i2 + SIZE_OF_LONG)); + int c2 = Long.compare(array.readLong(idx1 + 1), array.readLong(idx2 + 1)); if (c2 != 0) { return c2; } - return Long.compare(buffer.getLong(i1 + 2 * SIZE_OF_LONG), buffer.getLong(i2 + 2 * SIZE_OF_LONG)); + return Long.compare(array.readLong(idx1 + 2), array.readLong(idx2 + 2)); } - private void swap(int idx1, int idx2) { - int i1 = idx1 * TUPLE_SIZE; - int i2 = idx2 * TUPLE_SIZE; + private void swap(long tupleIdx1, long tupleIdx2) { + long idx1 = tupleIdx1 * ITEMS_COUNT; + long idx2 = tupleIdx2 * ITEMS_COUNT; - long tmp1 = buffer.getLong(i1); - long tmp2 = buffer.getLong(i1 + 1 * SIZE_OF_LONG); - long tmp3 = buffer.getLong(i1 + 2 * SIZE_OF_LONG); + long tmp1 = array.readLong(idx1); + long tmp2 = array.readLong(idx1 + 1); + long tmp3 = array.readLong(idx1 + 2); - buffer.setLong(i1, buffer.getLong(i2)); - buffer.setLong(i1 + 1 * SIZE_OF_LONG, buffer.getLong(i2 + 1 * SIZE_OF_LONG)); - buffer.setLong(i1 + 2 * SIZE_OF_LONG, buffer.getLong(i2 + 2 * SIZE_OF_LONG)); + array.writeLong(idx1, array.readLong(idx2)); + array.writeLong(idx1 + 1, array.readLong(idx2 + 1)); + array.writeLong(idx1 + 2, array.readLong(idx2 + 2)); - buffer.setLong(i2, tmp1); - buffer.setLong(i2 + 1 * SIZE_OF_LONG, tmp2); - buffer.setLong(i2 + 2 * SIZE_OF_LONG, tmp3); + array.writeLong(idx2, tmp1); + array.writeLong(idx2 + 1, tmp2); + array.writeLong(idx2 + 2, tmp3); } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/keystoretls/KeyStoreSSLContext.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/keystoretls/KeyStoreSSLContext.java index d35fbc37e605b..7b06a33601bef 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/keystoretls/KeyStoreSSLContext.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/keystoretls/KeyStoreSSLContext.java @@ -22,7 +22,6 @@ import com.google.common.base.Strings; import io.netty.handler.ssl.util.InsecureTrustManagerFactory; import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.IOException; import java.security.GeneralSecurityException; import java.security.KeyStore; @@ -35,12 +34,10 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLException; import javax.net.ssl.TrustManagerFactory; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.common.util.SecurityUtility; -import org.eclipse.jetty.util.ssl.SslContextFactory; /** * KeyStoreSSLContext that mainly wrap a SSLContext to provide SSL context for both webservice and netty. @@ -66,22 +63,22 @@ public enum Mode { @Getter private final Mode mode; - private String sslProviderString; - private String keyStoreTypeString; - private String keyStorePath; - private String keyStorePassword; - private boolean allowInsecureConnection; - private String trustStoreTypeString; - private String trustStorePath; - private String trustStorePassword; - private boolean needClientAuth; - private Set ciphers; - private Set protocols; + private final String sslProviderString; + private final String keyStoreTypeString; + private final String keyStorePath; + private final String keyStorePassword; + private final boolean allowInsecureConnection; + private final String trustStoreTypeString; + private final String trustStorePath; + private final String trustStorePassword; + private final boolean needClientAuth; + private final Set ciphers; + private final Set protocols; private SSLContext sslContext; - private String protocol = DEFAULT_SSL_PROTOCOL; - private String kmfAlgorithm = DEFAULT_SSL_KEYMANGER_ALGORITHM; - private String tmfAlgorithm = DEFAULT_SSL_TRUSTMANAGER_ALGORITHM; + private final String protocol = DEFAULT_SSL_PROTOCOL; + private final String kmfAlgorithm = DEFAULT_SSL_KEYMANGER_ALGORITHM; + private final String tmfAlgorithm = DEFAULT_SSL_TRUSTMANAGER_ALGORITHM; // only init vars, before using it, need to call createSSLContext to create ssl context. public KeyStoreSSLContext(Mode mode, @@ -109,8 +106,6 @@ public KeyStoreSSLContext(Mode mode, this.trustStorePath = trustStorePath; this.trustStorePassword = trustStorePassword; this.needClientAuth = requireTrustedClientCertOnConnect; - this.ciphers = ciphers; - this.protocols = protocols; if (protocols != null && protocols.size() > 0) { this.protocols = protocols; @@ -129,8 +124,10 @@ public KeyStoreSSLContext(Mode mode, public SSLContext createSSLContext() throws GeneralSecurityException, IOException { SSLContext sslContext; - if (sslProviderString != null) { - sslContext = SSLContext.getInstance(protocol, sslProviderString); + + Provider provider = SecurityUtility.resolveProvider(sslProviderString); + if (provider != null) { + sslContext = SSLContext.getInstance(protocol, provider); } else { sslContext = SSLContext.getInstance(protocol); } @@ -153,8 +150,8 @@ public SSLContext createSSLContext() throws GeneralSecurityException, IOExceptio if (this.allowInsecureConnection) { trustManagerFactory = InsecureTrustManagerFactory.INSTANCE; } else { - trustManagerFactory = sslProviderString != null - ? TrustManagerFactory.getInstance(tmfAlgorithm, sslProviderString) + trustManagerFactory = provider != null + ? TrustManagerFactory.getInstance(tmfAlgorithm, provider) : TrustManagerFactory.getInstance(tmfAlgorithm); KeyStore trustStore = KeyStore.getInstance(trustStoreTypeString); char[] passwordChars = trustStorePassword.toCharArray(); @@ -189,7 +186,9 @@ public SSLEngine createSSLEngine(String peerHost, int peerPort) { private SSLEngine configureSSLEngine(SSLEngine sslEngine) { sslEngine.setEnabledProtocols(protocols.toArray(new String[0])); - sslEngine.setEnabledCipherSuites(sslEngine.getSupportedCipherSuites()); + if (this.ciphers != null) { + sslEngine.setEnabledCipherSuites(this.ciphers.toArray(new String[0])); + } if (this.mode == Mode.SERVER) { sslEngine.setNeedClientAuth(this.needClientAuth); @@ -210,7 +209,7 @@ public static KeyStoreSSLContext createClientKeyStoreSslContext(String sslProvid String trustStorePassword, Set ciphers, Set protocols) - throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { + throws GeneralSecurityException, IOException { KeyStoreSSLContext keyStoreSSLContext = new KeyStoreSSLContext(Mode.CLIENT, sslProviderString, keyStoreTypeString, @@ -240,7 +239,7 @@ public static KeyStoreSSLContext createServerKeyStoreSslContext(String sslProvid boolean requireTrustedClientCertOnConnect, Set ciphers, Set protocols) - throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { + throws GeneralSecurityException, IOException { KeyStoreSSLContext keyStoreSSLContext = new KeyStoreSSLContext(Mode.SERVER, sslProviderString, keyStoreTypeString, @@ -268,7 +267,7 @@ public static SSLContext createServerSslContext(String sslProviderString, String trustStorePath, String trustStorePassword, boolean requireTrustedClientCertOnConnect) - throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { + throws GeneralSecurityException, IOException { return createServerKeyStoreSslContext( sslProviderString, @@ -295,7 +294,7 @@ public static SSLContext createClientSslContext(String sslProviderString, String trustStorePassword, Set ciphers, Set protocol) - throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { + throws GeneralSecurityException, IOException { KeyStoreSSLContext keyStoreSSLContext = new KeyStoreSSLContext(Mode.CLIENT, sslProviderString, keyStoreTypeString, @@ -319,7 +318,7 @@ public static SSLContext createClientSslContext(String keyStoreTypeString, String trustStoreTypeString, String trustStorePath, String trustStorePassword) - throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { + throws GeneralSecurityException, IOException { KeyStoreSSLContext keyStoreSSLContext = new KeyStoreSSLContext(Mode.CLIENT, null, keyStoreTypeString, @@ -335,48 +334,4 @@ public static SSLContext createClientSslContext(String keyStoreTypeString, return keyStoreSSLContext.createSSLContext(); } - - // for web server. autoRefresh is default true. - public static SslContextFactory createSslContextFactory(String sslProviderString, - String keyStoreTypeString, - String keyStore, - String keyStorePassword, - boolean allowInsecureConnection, - String trustStoreTypeString, - String trustStore, - String trustStorePassword, - boolean requireTrustedClientCertOnConnect, - long certRefreshInSec) - throws GeneralSecurityException, SSLException, FileNotFoundException, IOException { - SslContextFactory sslCtxFactory; - - if (sslProviderString == null) { - Provider provider = SecurityUtility.CONSCRYPT_PROVIDER; - if (provider != null) { - sslProviderString = provider.getName(); - } - } - - sslCtxFactory = new SslContextFactoryWithAutoRefresh( - sslProviderString, - keyStoreTypeString, - keyStore, - keyStorePassword, - allowInsecureConnection, - trustStoreTypeString, - trustStore, - trustStorePassword, - requireTrustedClientCertOnConnect, - certRefreshInSec); - - if (requireTrustedClientCertOnConnect) { - sslCtxFactory.setNeedClientAuth(true); - } else { - sslCtxFactory.setWantClientAuth(true); - } - sslCtxFactory.setTrustAll(true); - - return sslCtxFactory; - } -} - +} \ No newline at end of file diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/keystoretls/SslContextFactoryWithAutoRefresh.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/keystoretls/SslContextFactoryWithAutoRefresh.java deleted file mode 100644 index 0882a3a0cb131..0000000000000 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/keystoretls/SslContextFactoryWithAutoRefresh.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.common.util.keystoretls; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.security.GeneralSecurityException; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLException; -import org.eclipse.jetty.util.ssl.SslContextFactory; - -/** - * SslContextFactoryWithAutoRefresh that create SSLContext for web server, and refresh in time. - */ -public class SslContextFactoryWithAutoRefresh extends SslContextFactory { - private final NetSslContextBuilder sslCtxRefresher; - - public SslContextFactoryWithAutoRefresh(String sslProviderString, - String keyStoreTypeString, - String keyStore, - String keyStorePassword, - boolean allowInsecureConnection, - String trustStoreTypeString, - String trustStore, - String trustStorePassword, - boolean requireTrustedClientCertOnConnect, - long certRefreshInSec) - throws SSLException, FileNotFoundException, GeneralSecurityException, IOException { - super(); - sslCtxRefresher = new NetSslContextBuilder( - sslProviderString, - keyStoreTypeString, - keyStore, - keyStorePassword, - allowInsecureConnection, - trustStoreTypeString, - trustStore, - trustStorePassword, - requireTrustedClientCertOnConnect, - certRefreshInSec); - if (sslProviderString != null) { - setProvider(sslProviderString); - } - } - - @Override - public SSLContext getSslContext() { - return sslCtxRefresher.get(); - } -} diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/netty/DnsResolverUtil.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/netty/DnsResolverUtil.java new file mode 100644 index 0000000000000..5f1fe5a1ea6e1 --- /dev/null +++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/netty/DnsResolverUtil.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util.netty; + +import io.netty.resolver.dns.DnsNameResolverBuilder; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class DnsResolverUtil { + private static final int MIN_TTL = 0; + private static final int TTL; + private static final int NEGATIVE_TTL; + + // default TTL value when JDK setting is "forever" (-1) + private static final int DEFAULT_TTL = 60; + + // default negative TTL value when JDK setting is "forever" (-1) + private static final int DEFAULT_NEGATIVE_TTL = 10; + + static { + int ttl = DEFAULT_TTL; + int negativeTtl = DEFAULT_NEGATIVE_TTL; + try { + // use reflection to call sun.net.InetAddressCachePolicy's get and getNegative methods for getting + // effective JDK settings for DNS caching + Class inetAddressCachePolicyClass = Class.forName("sun.net.InetAddressCachePolicy"); + Method getTTLMethod = inetAddressCachePolicyClass.getMethod("get"); + ttl = (Integer) getTTLMethod.invoke(null); + Method getNegativeTTLMethod = inetAddressCachePolicyClass.getMethod("getNegative"); + negativeTtl = (Integer) getNegativeTTLMethod.invoke(null); + } catch (NoSuchMethodException | ClassNotFoundException | InvocationTargetException + | IllegalAccessException e) { + log.warn("Cannot get DNS TTL settings from sun.net.InetAddressCachePolicy class", e); + } + TTL = ttl <= 0 ? DEFAULT_TTL : ttl; + NEGATIVE_TTL = negativeTtl < 0 ? DEFAULT_NEGATIVE_TTL : negativeTtl; + } + + private DnsResolverUtil() { + // utility class with static methods, prevent instantiation + } + + /** + * Configure Netty's {@link DnsNameResolverBuilder}'s ttl and negativeTtl to match the JDK's DNS caching settings. + * If the JDK setting for TTL is forever (-1), the TTL will be set to 60 seconds. + * + * @param dnsNameResolverBuilder The Netty {@link DnsNameResolverBuilder} instance to apply the settings + */ + public static void applyJdkDnsCacheSettings(DnsNameResolverBuilder dnsNameResolverBuilder) { + dnsNameResolverBuilder.ttl(MIN_TTL, TTL); + dnsNameResolverBuilder.negativeTtl(NEGATIVE_TTL); + } +} diff --git a/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/AdvertisedListener.java b/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/AdvertisedListener.java index a310974ffa358..b73fdab4483be 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/AdvertisedListener.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/AdvertisedListener.java @@ -44,4 +44,29 @@ public class AdvertisedListener { @Setter // the broker service uri with ssl private URI brokerServiceUrlTls; + + // + @Getter + @Setter + // the broker service uri without ssl + private URI brokerHttpUrl; + // + @Getter + @Setter + // the broker service uri with ssl + private URI brokerHttpsUrl; + + public boolean hasUriForProtocol(String protocol) { + if ("pulsar".equals(protocol)) { + return brokerServiceUrl != null; + } else if ("pulsar+ssl".equals(protocol)) { + return brokerServiceUrlTls != null; + } else if ("http".equals(protocol)) { + return brokerHttpUrl != null; + } else if ("https".equals(protocol)) { + return brokerHttpsUrl != null; + } else { + return false; + } + } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/BrokerUsage.java b/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/BrokerUsage.java index 57915029d6f5a..6fd7d60fbd9c3 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/BrokerUsage.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/BrokerUsage.java @@ -56,13 +56,13 @@ public static BrokerUsage populateFrom(Map metrics) { BrokerUsage brokerUsage = null; if (metrics.containsKey("brk_conn_cnt")) { brokerUsage = new BrokerUsage(); - brokerUsage.connectionCount = ((Long) metrics.get("brk_conn_cnt")).longValue(); + brokerUsage.connectionCount = (Long) metrics.get("brk_conn_cnt"); } if (metrics.containsKey("brk_repl_conn_cnt")) { if (brokerUsage == null) { brokerUsage = new BrokerUsage(); } - brokerUsage.replicationConnectionCount = ((Long) metrics.get("brk_repl_conn_cnt")).longValue(); + brokerUsage.replicationConnectionCount = (Long) metrics.get("brk_repl_conn_cnt"); } return brokerUsage; } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/LocalBrokerData.java b/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/LocalBrokerData.java index 595bbe9d0d269..75f32eaa82e8f 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/LocalBrokerData.java +++ b/pulsar-common/src/main/java/org/apache/pulsar/policies/data/loadbalancer/LocalBrokerData.java @@ -235,7 +235,8 @@ private void updateBundleData(final Map bundleStat } public double getMaxResourceUsage() { - return max(cpu.percentUsage(), memory.percentUsage(), directMemory.percentUsage(), bandwidthIn.percentUsage(), + // does not consider memory because it is noisy by gc. + return max(cpu.percentUsage(), directMemory.percentUsage(), bandwidthIn.percentUsage(), bandwidthOut.percentUsage()) / 100; } @@ -255,6 +256,16 @@ public double getMaxResourceUsageWithWeight(final double cpuWeight, final double bandwidthOut.percentUsage() * bandwidthOutWeight) / 100; } + public double getMaxResourceUsageWithWeightWithinLimit(final double cpuWeight, final double memoryWeight, + final double directMemoryWeight, + final double bandwidthInWeight, + final double bandwidthOutWeight) { + return maxWithinLimit(100.0d, + cpu.percentUsage() * cpuWeight, memory.percentUsage() * memoryWeight, + directMemory.percentUsage() * directMemoryWeight, bandwidthIn.percentUsage() * bandwidthInWeight, + bandwidthOut.percentUsage() * bandwidthOutWeight) / 100; + } + private static double max(double... args) { double max = Double.NEGATIVE_INFINITY; @@ -279,6 +290,16 @@ private static float max(float...args) { return max; } + private static double maxWithinLimit(double limit, double...args) { + double max = 0.0; + for (double d : args) { + if (d > max && d <= limit) { + max = d; + } + } + return max; + } + public String getLoadReportType() { return loadReportType; } diff --git a/pulsar-common/src/main/proto/PulsarApi.proto b/pulsar-common/src/main/proto/PulsarApi.proto index 34ad37b4ee2f6..ec06eae26f645 100644 --- a/pulsar-common/src/main/proto/PulsarApi.proto +++ b/pulsar-common/src/main/proto/PulsarApi.proto @@ -699,6 +699,9 @@ message CommandConsumerStatsResponse { /// Number of messages in the subscription backlog optional uint64 msgBacklog = 15; + + /// Total rate of messages ack. msg/s + optional double messageAckRate = 16; } message CommandGetLastMessageId { diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/api/raw/RawMessageImplTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/api/raw/RawMessageImplTest.java index 037a6ffbe4c95..a602cef484350 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/api/raw/RawMessageImplTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/api/raw/RawMessageImplTest.java @@ -18,15 +18,20 @@ */ package org.apache.pulsar.common.api.raw; +import static java.util.Collections.singletonList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import com.google.common.collect.ImmutableMap; import io.netty.buffer.ByteBuf; +import java.util.Map; +import org.apache.pulsar.common.api.proto.KeyValue; +import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.api.proto.SingleMessageMetadata; -import org.mockito.Mockito; import org.testng.annotations.Test; -import java.util.Map; - -import static org.testng.Assert.assertEquals; - public class RawMessageImplTest { private static final String HARD_CODE_KEY = "__pfn_input_topic__"; @@ -38,7 +43,7 @@ public class RawMessageImplTest { @Test public void testGetProperties() { ReferenceCountedMessageMetadata refCntMsgMetadata = - ReferenceCountedMessageMetadata.get(Mockito.mock(ByteBuf.class)); + ReferenceCountedMessageMetadata.get(mock(ByteBuf.class)); SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata(); singleMessageMetadata.addProperty().setKey(HARD_CODE_KEY).setValue(KEY_VALUE_FIRST); singleMessageMetadata.addProperty().setKey(HARD_CODE_KEY).setValue(KEY_VALUE_SECOND); @@ -50,4 +55,42 @@ public void testGetProperties() { assertEquals(KEY_VALUE_SECOND, properties.get(HARD_CODE_KEY)); assertEquals(HARD_CODE_KEY_ID_VALUE, properties.get(HARD_CODE_KEY_ID)); } + + @Test + public void testNonBatchedMessage() { + MessageMetadata messageMetadata = new MessageMetadata(); + messageMetadata.setPartitionKeyB64Encoded(true); + messageMetadata.addAllProperties(singletonList(new KeyValue().setKey("key1").setValue("value1"))); + messageMetadata.setEventTime(100L); + + ReferenceCountedMessageMetadata refCntMsgMetadata = mock(ReferenceCountedMessageMetadata.class); + when(refCntMsgMetadata.getMetadata()).thenReturn(messageMetadata); + + // Non-batched message's singleMessageMetadata is null + RawMessage msg = RawMessageImpl.get(refCntMsgMetadata, null, null, 0, 0, 0); + assertTrue(msg.hasBase64EncodedKey()); + assertEquals(msg.getProperties(), ImmutableMap.of("key1", "value1")); + assertEquals(msg.getEventTime(), 100L); + } + + @Test + public void testBatchedMessage() { + MessageMetadata messageMetadata = new MessageMetadata(); + messageMetadata.setPartitionKeyB64Encoded(true); + messageMetadata.addAllProperties(singletonList(new KeyValue().setKey("key1").setValue("value1"))); + messageMetadata.setEventTime(100L); + + ReferenceCountedMessageMetadata refCntMsgMetadata = mock(ReferenceCountedMessageMetadata.class); + when(refCntMsgMetadata.getMetadata()).thenReturn(messageMetadata); + + SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata(); + singleMessageMetadata.setPartitionKeyB64Encoded(false); + singleMessageMetadata.addAllProperties(singletonList(new KeyValue().setKey("key2").setValue("value2"))); + singleMessageMetadata.setEventTime(200L); + + RawMessage msg = RawMessageImpl.get(refCntMsgMetadata, singleMessageMetadata, null, 0, 0, 0); + assertFalse(msg.hasBase64EncodedKey()); + assertEquals(msg.getProperties(), ImmutableMap.of("key2", "value2")); + assertEquals(msg.getEventTime(), 200L); + } } \ No newline at end of file diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/compression/CommandsTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/compression/CommandsTest.java index 24d34ac547fb7..207c6202426c8 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/compression/CommandsTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/compression/CommandsTest.java @@ -18,22 +18,23 @@ */ package org.apache.pulsar.common.compression; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.pulsar.common.protocol.Commands.serializeMetadataAndPayload; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; - import com.scurrilous.circe.checksum.Crc32cIntChecksum; - import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; - import java.io.IOException; - +import java.util.Base64; +import io.netty.util.ReferenceCountUtil; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.proto.MessageMetadata; import org.apache.pulsar.common.protocol.ByteBufPair; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.Commands.ChecksumType; +import org.testng.Assert; import org.testng.annotations.Test; public class CommandsTest { @@ -93,5 +94,35 @@ private int computeChecksum(MessageMetadata msgMetadata, ByteBuf compressedPaylo return computedChecksum; } - + @Test + public void testPeekStickyKey() { + String message = "msg-1"; + String partitionedKey = "key1"; + MessageMetadata messageMetadata2 = new MessageMetadata() + .setSequenceId(1) + .setProducerName("testProducer") + .setPartitionKey(partitionedKey) + .setPartitionKeyB64Encoded(false) + .setPublishTime(System.currentTimeMillis()); + ByteBuf byteBuf = serializeMetadataAndPayload(Commands.ChecksumType.Crc32c, messageMetadata2, + Unpooled.copiedBuffer(message.getBytes(UTF_8))); + byte[] bytes = Commands.peekStickyKey(byteBuf, "topic-1", "sub-1"); + String key = new String(bytes); + Assert.assertEquals(partitionedKey, key); + ReferenceCountUtil.safeRelease(byteBuf); + // test 64 encoded + String partitionedKey2 = Base64.getEncoder().encodeToString("key2".getBytes(UTF_8)); + MessageMetadata messageMetadata = new MessageMetadata() + .setSequenceId(1) + .setProducerName("testProducer") + .setPartitionKey(partitionedKey2) + .setPartitionKeyB64Encoded(true) + .setPublishTime(System.currentTimeMillis()); + ByteBuf byteBuf2 = serializeMetadataAndPayload(Commands.ChecksumType.Crc32c, messageMetadata, + Unpooled.copiedBuffer(message.getBytes(UTF_8))); + byte[] bytes2 = Commands.peekStickyKey(byteBuf2, "topic-2", "sub-2"); + String key2 = Base64.getEncoder().encodeToString(bytes2);; + Assert.assertEquals(partitionedKey2, key2); + ReferenceCountUtil.safeRelease(byteBuf2); + } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/AutoTopicCreationOverrideTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/AutoTopicCreationOverrideTest.java index 5092d433d0db7..66769f0bfbc18 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/AutoTopicCreationOverrideTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/policies/data/AutoTopicCreationOverrideTest.java @@ -32,7 +32,7 @@ public void testValidOverrideNonPartitioned() { .allowAutoTopicCreation(true) .topicType(TopicType.NON_PARTITIONED.toString()) .build(); - assertTrue(AutoTopicCreationOverrideImpl.isValidOverride(override)); + assertTrue(AutoTopicCreationOverrideImpl.validateOverride(override).isSuccess()); } @Test @@ -42,7 +42,7 @@ public void testValidOverridePartitioned() { .topicType(TopicType.PARTITIONED.toString()) .defaultNumPartitions(2) .build(); - assertTrue(AutoTopicCreationOverrideImpl.isValidOverride(override)); + assertTrue(AutoTopicCreationOverrideImpl.validateOverride(override).isSuccess()); } @Test @@ -51,7 +51,7 @@ public void testInvalidTopicType() { .allowAutoTopicCreation(true) .topicType("aaa") .build(); - assertFalse(AutoTopicCreationOverrideImpl.isValidOverride(override)); + assertFalse(AutoTopicCreationOverrideImpl.validateOverride(override).isSuccess()); } @Test @@ -61,7 +61,7 @@ public void testNumPartitionsTooLow() { .topicType(TopicType.PARTITIONED.toString()) .defaultNumPartitions(0) .build(); - assertFalse(AutoTopicCreationOverrideImpl.isValidOverride(override)); + assertFalse(AutoTopicCreationOverrideImpl.validateOverride(override).isSuccess()); } @Test @@ -70,7 +70,7 @@ public void testNumPartitionsNotSet() { .allowAutoTopicCreation(true) .topicType(TopicType.PARTITIONED.toString()) .build(); - assertFalse(AutoTopicCreationOverrideImpl.isValidOverride(override)); + assertFalse(AutoTopicCreationOverrideImpl.validateOverride(override).isSuccess()); } @Test @@ -80,6 +80,6 @@ public void testNumPartitionsOnNonPartitioned() { .topicType(TopicType.NON_PARTITIONED.toString()) .defaultNumPartitions(2) .build(); - assertFalse(AutoTopicCreationOverrideImpl.isValidOverride(override)); + assertFalse(AutoTopicCreationOverrideImpl.validateOverride(override).isSuccess()); } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java index a0853116295ea..a8a3fee5cfdca 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FieldParserTest.java @@ -19,9 +19,13 @@ package org.apache.pulsar.common.util; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; +import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Set; import org.testng.annotations.Test; import com.google.common.collect.Maps; @@ -35,6 +39,33 @@ public void testMap() { properties.put("stringStringMap", "key1=value1,key2=value2"); properties.put("stringIntMap", "key1=1,key2=2"); properties.put("longStringMap", "1=value1,2=value2"); + + MyConfig config = new MyConfig(); + FieldParser.update(properties, config); + assertEquals(config.name, "config"); + assertEquals(config.stringStringMap.get("key1"), "value1"); + assertEquals(config.stringStringMap.get("key2"), "value2"); + + assertEquals((int) config.stringIntMap.get("key1"), 1); + assertEquals((int) config.stringIntMap.get("key2"), 2); + + assertEquals(config.longStringMap.get(1L), "value1"); + assertEquals(config.longStringMap.get(2L), "value2"); + + } + + @Test + public void testWithBlankVallueConfig() { + Map properties = new HashMap<>(); + properties.put("name", " config "); + properties.put("stringStringMap", "key1=value1 , key2= value2 "); + properties.put("stringIntMap", "key1 = 1, key2 = 2 "); + properties.put("longStringMap", " 1 =value1 ,2 =value2 "); + properties.put("longList", " 1, 3, 8 , 0 ,9 "); + properties.put("stringList", " aa, bb , cc, ee "); + properties.put("longSet", " 1, 3, 8 , 0 , 3, 1 ,9 "); + properties.put("stringSet", " aa, bb , cc, ee , bb, aa "); + MyConfig config = new MyConfig(); FieldParser.update(properties, config); assertEquals(config.name, "config"); @@ -47,6 +78,11 @@ public void testMap() { assertEquals(config.longStringMap.get(1L), "value1"); assertEquals(config.longStringMap.get(2L), "value2"); + assertEquals((long)config.longList.get(2), 8); + assertEquals(config.stringList.get(1), "bb"); + + assertTrue(config.longSet.contains(3L)); + assertTrue(config.stringSet.contains("bb")); } public static class MyConfig { @@ -54,6 +90,10 @@ public static class MyConfig { public Map stringStringMap; public Map stringIntMap; public Map longStringMap; + public List longList; + public List stringList; + public Set longSet; + public Set stringSet; } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java index b9458bf8e1efd..0de407676567e 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/FutureUtilTest.java @@ -25,13 +25,18 @@ import java.io.PrintWriter; import java.io.StringWriter; import java.time.Duration; +import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeoutException; import lombok.Cleanup; +import org.assertj.core.util.Lists; import org.testng.annotations.Test; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; public class FutureUtilTest { @@ -91,4 +96,44 @@ public void testCreatingFutureWithTimeoutHandling() { assertEquals(executionException.getCause(), e); } } + + @Test + public void testWaitForAny() { + CompletableFuture f1 = new CompletableFuture<>(); + CompletableFuture f2 = new CompletableFuture<>(); + CompletableFuture f3 = new CompletableFuture<>(); + CompletableFuture f4 = new CompletableFuture<>(); + f1.complete("1"); + f2.complete("2"); + f3.complete("3"); + f4.complete("4"); + CompletableFuture> ret = FutureUtil.waitForAny(Lists.newArrayList(f1, f2, f3, f4), p -> p.equals("3")); + assertEquals(ret.join().get(), "3"); + // test not matched predicate result + CompletableFuture f5 = new CompletableFuture<>(); + CompletableFuture f6 = new CompletableFuture<>(); + f5.complete("5"); + f6.complete("6"); + ret = FutureUtil.waitForAny(Lists.newArrayList(f5, f6), p -> p.equals("3")); + assertFalse(ret.join().isPresent()); + // test one complete, others are cancelled. + CompletableFuture f55 = new CompletableFuture<>(); + CompletableFuture f66 = new CompletableFuture<>(); + f55.complete("55"); + ret = FutureUtil.waitForAny(Lists.newArrayList(f55, f66), p -> p.equals("55")); + assertTrue(ret.join().isPresent()); + assertTrue(f66.isCancelled()); + // test with exception + CompletableFuture f7 = new CompletableFuture<>(); + CompletableFuture f8 = new CompletableFuture<>(); + f8.completeExceptionally(new RuntimeException("f7 exception")); + f8.completeExceptionally(new RuntimeException("f8 exception")); + ret = FutureUtil.waitForAny(Lists.newArrayList(f7, f8), p -> p.equals("3")); + try { + ret.join(); + fail("Should have failed"); + } catch (CompletionException ex) { + assertTrue(ex.getCause() instanceof RuntimeException); + } + } } \ No newline at end of file diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/ObjectMapperFactoryTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/ObjectMapperFactoryTest.java index dc8aa8b43fcf3..f9e372941ef0f 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/ObjectMapperFactoryTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/ObjectMapperFactoryTest.java @@ -20,7 +20,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import lombok.ToString; -import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.ResourceQuota; import org.apache.pulsar.common.policies.data.impl.BacklogQuotaImpl; import org.apache.pulsar.common.stats.Metrics; @@ -28,32 +27,6 @@ import org.testng.annotations.Test; public class ObjectMapperFactoryTest { - @Test - public void testBacklogQuotaMixIn() { - ObjectMapper objectMapper = ObjectMapperFactory.getThreadLocal(); - String json = "{\"limit\":10,\"limitTime\":0,\"policy\":\"producer_request_hold\"}"; - try { - BacklogQuota backlogQuota = objectMapper.readValue(json, BacklogQuota.class); - Assert.assertEquals(backlogQuota.getLimitSize(), 10); - Assert.assertEquals(backlogQuota.getLimitTime(), 0); - Assert.assertEquals(backlogQuota.getPolicy(), BacklogQuota.RetentionPolicy.producer_request_hold); - } catch (Exception ex) { - Assert.fail("shouldn't have thrown exception", ex); - } - - try { - String expectJson = "{\"limitSize\":10,\"limitTime\":0,\"policy\":\"producer_request_hold\"}"; - BacklogQuota backlogQuota = BacklogQuota.builder() - .limitSize(10) - .limitTime(0) - .retentionPolicy(BacklogQuota.RetentionPolicy.producer_request_hold) - .build(); - String writeJson = objectMapper.writeValueAsString(backlogQuota); - Assert.assertEquals(expectJson, writeJson); - } catch (Exception ex) { - Assert.fail("shouldn't have thrown exception", ex); - } - } @Test public void testResourceQuotaMixIn() { diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java index 788ab749390db..57090fcc7b7e0 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java @@ -133,6 +133,24 @@ public void testTryAcquire() { rate.close(); } + @Test + public void testTryAcquireMoreThanPermits() { + final long rateTimeMSec = 1000; + RateLimiter rate = RateLimiter.builder().permits(3).rateTime(rateTimeMSec).timeUnit(TimeUnit.MILLISECONDS) + .build(); + assertTrue(rate.tryAcquire(2)); + assertEquals(rate.getAvailablePermits(), 1); + + //try to acquire failed, not decrease availablePermits. + assertFalse(rate.tryAcquire(2)); + assertEquals(rate.getAvailablePermits(), 1); + + assertTrue(rate.tryAcquire(1)); + assertEquals(rate.getAvailablePermits(), 0); + + rate.close(); + } + @Test public void testMultipleTryAcquire() { final long rateTimeMSec = 1000; @@ -189,7 +207,7 @@ public void testDispatchRate() throws Exception { Thread.sleep(rateTimeMSec); // check after three rate-time: acquiredPermits is 0 - assertEquals(rate.getAvailablePermits() > 0, true); + assertTrue(rate.getAvailablePermits() > 0); rate.close(); } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/RunnablesTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/RunnablesTest.java new file mode 100644 index 0000000000000..6dfbe1beececf --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/RunnablesTest.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.common.util; + +import org.testng.annotations.Test; + +public class RunnablesTest { + + @Test + public void shouldCatchAndLogException() { + Runnables.catchingAndLoggingThrowables(() -> { + throw new RuntimeException(); + }).run(); + } +} \ No newline at end of file diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/TrustManagerProxyTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/TrustManagerProxyTest.java new file mode 100644 index 0000000000000..33163f6e6a568 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/TrustManagerProxyTest.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import com.google.common.io.Resources; +import java.security.cert.X509Certificate; +import java.util.Arrays; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +public class TrustManagerProxyTest { + @DataProvider(name = "caDataProvider") + public static Object[][] caDataProvider() { + return new Object[][]{ + {"ca/multiple-ca.pem", 2}, + {"ca/single-ca.pem", 1} + }; + } + + @Test(dataProvider = "caDataProvider") + public void testLoadCA(String path, int count) { + String caPath = Resources.getResource(path).getPath(); + + ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); + try { + TrustManagerProxy trustManagerProxy = + new TrustManagerProxy(caPath, 120, scheduledExecutor); + X509Certificate[] x509Certificates = trustManagerProxy.getAcceptedIssuers(); + assertNotNull(x509Certificates); + assertEquals(Arrays.stream(x509Certificates).count(), count); + } finally { + scheduledExecutor.shutdown(); + } + } +} diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java index 14d8395ae8c8a..205cf91b47d12 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java @@ -48,21 +48,29 @@ public class ConcurrentLongHashMapTest { @Test public void testConstructor() { try { - new ConcurrentLongHashMap(0); + ConcurrentLongHashMap.newBuilder() + .expectedItems(0) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok } try { - new ConcurrentLongHashMap(16, 0); + ConcurrentLongHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(0) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok } try { - new ConcurrentLongHashMap(4, 8); + ConcurrentLongHashMap.newBuilder() + .expectedItems(4) + .concurrencyLevel(8) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok @@ -71,7 +79,9 @@ public void testConstructor() { @Test public void simpleInsertions() { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(16); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(16) + .build(); assertTrue(map.isEmpty()); assertNull(map.put(1, "one")); @@ -97,9 +107,83 @@ public void simpleInsertions() { assertEquals(map.size(), 3); } + @Test + public void testReduceUnnecessaryExpansions() { + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .build(); + assertNull(map.put(1, "v1")); + assertNull(map.put(2, "v2")); + assertNull(map.put(3, "v3")); + assertNull(map.put(4, "v4")); + + assertTrue(map.remove(1, "v1")); + assertTrue(map.remove(2, "v2")); + assertTrue(map.remove(3, "v3")); + assertTrue(map.remove(4, "v4")); + + assertEquals(0, map.getUsedBucketCount()); + } + + @Test + public void testClear() { + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertNull(map.put(1, "v1")); + assertNull(map.put(2, "v2")); + assertNull(map.put(3, "v3")); + + assertTrue(map.capacity() == 8); + map.clear(); + assertTrue(map.capacity() == 4); + } + + @Test + public void testExpandAndShrink() { + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertNull(map.put(1, "v1")); + assertNull(map.put(2, "v2")); + assertNull(map.put(3, "v3")); + + // expand hashmap + assertTrue(map.capacity() == 8); + + assertTrue(map.remove(1, "v1")); + // not shrink + assertTrue(map.capacity() == 8); + assertTrue(map.remove(2, "v2")); + // shrink hashmap + assertTrue(map.capacity() == 4); + + // expand hashmap + assertNull(map.put(4, "v4")); + assertNull(map.put(5, "v5")); + assertTrue(map.capacity() == 8); + + //verify that the map does not keep shrinking at every remove() operation + assertNull(map.put(6, "v6")); + assertTrue(map.remove(6, "v6")); + assertTrue(map.capacity() == 8); + } + @Test public void testRemove() { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .build(); assertTrue(map.isEmpty()); assertNull(map.put(1, "one")); @@ -115,7 +199,10 @@ public void testRemove() { @Test public void testNegativeUsedBucketCount() { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(16, 1); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); map.put(0, "zero"); assertEquals(1, map.getUsedBucketCount()); @@ -130,7 +217,10 @@ public void testNegativeUsedBucketCount() { @Test public void testRehashing() { int n = 16; - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(n / 2, 1); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); assertEquals(map.capacity(), n); assertEquals(map.size(), 0); @@ -145,7 +235,10 @@ public void testRehashing() { @Test public void testRehashingWithDeletes() { int n = 16; - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(n / 2, 1); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); assertEquals(map.capacity(), n); assertEquals(map.size(), 0); @@ -167,7 +260,8 @@ public void testRehashingWithDeletes() { @Test public void concurrentInsertions() throws Throwable { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -201,7 +295,8 @@ public void concurrentInsertions() throws Throwable { @Test public void concurrentInsertionsAndReads() throws Throwable { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -235,7 +330,10 @@ public void concurrentInsertionsAndReads() throws Throwable { @Test public void stressConcurrentInsertionsAndReads() throws Throwable { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(4, 1); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(4) + .concurrencyLevel(1) + .build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); final int writeThreads = 16; @@ -286,7 +384,8 @@ public void stressConcurrentInsertionsAndReads() throws Throwable { @Test public void testIteration() { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .build(); assertEquals(map.keys(), Collections.emptyList()); assertEquals(map.values(), Collections.emptyList()); @@ -330,7 +429,10 @@ public void testIteration() { @Test public void testHashConflictWithDeletion() { final int Buckets = 16; - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(Buckets, 1); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(Buckets) + .concurrencyLevel(1) + .build(); // Pick 2 keys that fall into the same bucket long key1 = 1; @@ -363,7 +465,8 @@ public void testHashConflictWithDeletion() { @Test public void testPutIfAbsent() { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .build(); assertNull(map.putIfAbsent(1, "one")); assertEquals(map.get(1), "one"); @@ -373,7 +476,10 @@ public void testPutIfAbsent() { @Test public void testComputeIfAbsent() { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(16, 1); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); AtomicInteger counter = new AtomicInteger(); LongFunction provider = key -> counter.getAndIncrement(); @@ -395,7 +501,10 @@ public void testComputeIfAbsent() { static final int N = 100_000; public void benchConcurrentLongHashMap() throws Exception { - ConcurrentLongHashMap map = new ConcurrentLongHashMap<>(N, 1); + ConcurrentLongHashMap map = ConcurrentLongHashMap.newBuilder() + .expectedItems(N) + .concurrencyLevel(1) + .build(); for (long i = 0; i < Iterations; i++) { for (int j = 0; j < N; j++) { diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java new file mode 100644 index 0000000000000..98a96804d25e0 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java @@ -0,0 +1,427 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util.collections; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.common.collect.Lists; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair; +import org.junit.Test; + +/** + * Test the concurrent long-long pair hashmap class. + */ +public class ConcurrentLongLongPairHashMapTest { + + @Test + public void testConstructor() { + try { + ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(0) + .build(); + fail("should have thrown exception"); + } catch (IllegalArgumentException e) { + // ok + } + + try { + ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(0) + .build(); + fail("should have thrown exception"); + } catch (IllegalArgumentException e) { + // ok + } + + try { + ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(4) + .concurrencyLevel(8) + .build(); + fail("should have thrown exception"); + } catch (IllegalArgumentException e) { + // ok + } + } + + @Test + public void simpleInsertions() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(16) + .build(); + assertTrue(map.isEmpty()); + assertTrue(map.put(1, 1, 11, 11)); + assertFalse(map.isEmpty()); + + assertTrue(map.put(2, 2, 22, 22)); + assertTrue(map.put(3, 3, 33, 33)); + + assertEquals(map.size(), 3); + + assertEquals(map.get(1, 1), new LongPair(11, 11)); + assertEquals(map.size(), 3); + + assertTrue(map.remove(1, 1)); + assertEquals(map.size(), 2); + assertEquals(map.get(1, 1), null); + assertEquals(map.get(5, 5), null); + assertEquals(map.size(), 2); + + assertTrue(map.put(1, 1, 11, 11)); + assertEquals(map.size(), 3); + assertTrue(map.put(1, 1, 111, 111)); + assertEquals(map.size(), 3); + } + + @Test + public void testRemove() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap + .newBuilder() + .build(); + + assertTrue(map.isEmpty()); + assertTrue(map.put(1, 1, 11, 11)); + assertFalse(map.isEmpty()); + + assertFalse(map.remove(0, 0)); + assertFalse(map.remove(1, 1, 111, 111)); + + assertFalse(map.isEmpty()); + assertTrue(map.remove(1, 1, 11, 11)); + assertTrue(map.isEmpty()); + } + + @Test + public void testClear() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertTrue(map.put(1, 1, 11, 11)); + assertTrue(map.put(2, 2, 22, 22)); + assertTrue(map.put(3, 3, 33, 33)); + + assertTrue(map.capacity() == 8); + map.clear(); + assertTrue(map.capacity() == 4); + } + + @Test + public void testExpandAndShrink() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.put(1, 1, 11, 11)); + assertTrue(map.put(2, 2, 22, 22)); + assertTrue(map.put(3, 3, 33, 33)); + + // expand hashmap + assertTrue(map.capacity() == 8); + + assertTrue(map.remove(1, 1, 11, 11)); + // not shrink + assertTrue(map.capacity() == 8); + assertTrue(map.remove(2, 2, 22, 22)); + // shrink hashmap + assertTrue(map.capacity() == 4); + + // expand hashmap + assertTrue(map.put(4, 4, 44, 44)); + assertTrue(map.put(5, 5, 55, 55)); + assertTrue(map.capacity() == 8); + + //verify that the map does not keep shrinking at every remove() operation + assertTrue(map.put(6, 6, 66, 66)); + assertTrue(map.remove(6, 6, 66, 66)); + assertTrue(map.capacity() == 8); + } + + @Test + public void testNegativeUsedBucketCount() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + + map.put(0, 0, 0, 0); + assertEquals(1, map.getUsedBucketCount()); + map.put(0, 0, 1, 1); + assertEquals(1, map.getUsedBucketCount()); + map.remove(0, 0); + assertEquals(0, map.getUsedBucketCount()); + map.remove(0, 0); + assertEquals(0, map.getUsedBucketCount()); + } + + @Test + public void testRehashing() { + int n = 16; + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); + assertEquals(map.capacity(), n); + assertEquals(map.size(), 0); + + for (int i = 0; i < n; i++) { + map.put(i, i, i, i); + } + + assertEquals(map.capacity(), 2 * n); + assertEquals(map.size(), n); + } + + @Test + public void testRehashingWithDeletes() { + int n = 16; + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); + assertEquals(map.capacity(), n); + assertEquals(map.size(), 0); + + for (int i = 0; i < n / 2; i++) { + map.put(i, i, i, i); + } + + for (int i = 0; i < n / 2; i++) { + map.remove(i, i); + } + + for (int i = n; i < (2 * n); i++) { + map.put(i, i, i, i); + } + + assertEquals(map.capacity(), 2 * n); + assertEquals(map.size(), n); + } + + @Test + public void concurrentInsertions() throws Throwable { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .build(); + ExecutorService executor = Executors.newCachedThreadPool(); + + final int nThreads = 16; + final int n = 100_000; + long value = 55; + + List> futures = new ArrayList<>(); + for (int i = 0; i < nThreads; i++) { + final int threadIdx = i; + + futures.add(executor.submit(() -> { + Random random = new Random(); + + for (int j = 0; j < n; j++) { + long key1 = Math.abs(random.nextLong()); + // Ensure keys are uniques + key1 -= key1 % (threadIdx + 1); + + long key2 = Math.abs(random.nextLong()); + // Ensure keys are uniques + key2 -= key2 % (threadIdx + 1); + + map.put(key1, key2, value, value); + } + })); + } + + for (Future future : futures) { + future.get(); + } + + assertEquals(map.size(), n * nThreads); + + executor.shutdown(); + } + + @Test + public void concurrentInsertionsAndReads() throws Throwable { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .build(); + ExecutorService executor = Executors.newCachedThreadPool(); + + final int nThreads = 16; + final int n = 100_000; + final long value = 55; + + List> futures = new ArrayList<>(); + for (int i = 0; i < nThreads; i++) { + final int threadIdx = i; + + futures.add(executor.submit(() -> { + Random random = new Random(); + + for (int j = 0; j < n; j++) { + long key1 = Math.abs(random.nextLong()); + // Ensure keys are uniques + key1 -= key1 % (threadIdx + 1); + + long key2 = Math.abs(random.nextLong()); + // Ensure keys are uniques + key2 -= key2 % (threadIdx + 1); + + map.put(key1, key2, value, value); + } + })); + } + + for (Future future : futures) { + future.get(); + } + + assertEquals(map.size(), n * nThreads); + + executor.shutdown(); + } + + @Test + public void testIteration() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .build(); + + assertEquals(map.keys(), Collections.emptyList()); + assertEquals(map.values(), Collections.emptyList()); + + map.put(0, 0, 0, 0); + + assertEquals(map.keys(), Lists.newArrayList(new LongPair(0, 0))); + assertEquals(map.values(), Lists.newArrayList(new LongPair(0, 0))); + + map.remove(0, 0); + + assertEquals(map.keys(), Collections.emptyList()); + assertEquals(map.values(), Collections.emptyList()); + + map.put(0, 0, 0, 0); + map.put(1, 1, 11, 11); + map.put(2, 2, 22, 22); + + List keys = map.keys(); + Collections.sort(keys); + assertEquals(keys, Lists.newArrayList(new LongPair(0, 0), new LongPair(1, 1), new LongPair(2, 2))); + + List values = map.values(); + Collections.sort(values); + assertEquals(values, Lists.newArrayList(new LongPair(0, 0), new LongPair(11, 11), new LongPair(22, 22))); + + map.put(1, 1, 111, 111); + + keys = map.keys(); + Collections.sort(keys); + assertEquals(keys, Lists.newArrayList(new LongPair(0, 0), new LongPair(1, 1), new LongPair(2, 2))); + + values = map.values(); + Collections.sort(values); + assertEquals(values, Lists.newArrayList(new LongPair(0, 0), new LongPair(22, 22), new LongPair(111, 111))); + + map.clear(); + assertTrue(map.isEmpty()); + } + + @Test + public void testPutIfAbsent() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .build(); + + assertTrue(map.putIfAbsent(1, 1, 11, 11)); + assertEquals(map.get(1, 1), new LongPair(11, 11)); + + assertFalse(map.putIfAbsent(1, 1, 111, 111)); + assertEquals(map.get(1, 1), new LongPair(11, 11)); + } + + @Test + public void testIvalidKeys() { + ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + + + try { + map.put(-5, 3, 4, 4); + fail("should have failed"); + } catch (IllegalArgumentException e) { + // ok + } + + try { + map.get(-1, 0); + fail("should have failed"); + } catch (IllegalArgumentException e) { + // ok + } + + try { + map.containsKey(-1, 0); + fail("should have failed"); + } catch (IllegalArgumentException e) { + // ok + } + + try { + map.putIfAbsent(-1, 1, 1, 1); + fail("should have failed"); + } catch (IllegalArgumentException e) { + // ok + } + } + + @Test + public void testAsMap() { + ConcurrentLongLongPairHashMap lmap = ConcurrentLongLongPairHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + lmap.put(1, 1, 11, 11); + lmap.put(2, 2, 22, 22); + lmap.put(3, 3, 33, 33); + + Map map = new HashMap<>(); + map.put(new LongPair(1, 1), new LongPair(11, 11)); + map.put(new LongPair(2, 2), new LongPair(22, 22)); + map.put(new LongPair(3, 3), new LongPair(33, 33)); + + assertEquals(map, lmap.asMap()); + } +} diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java index 82cac712975ed..86030f2161985 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java @@ -45,30 +45,59 @@ public class ConcurrentLongPairSetTest { @Test public void testConstructor() { try { - new ConcurrentLongPairSet(0); + ConcurrentLongPairSet.newBuilder() + .expectedItems(0) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok } try { - new ConcurrentLongPairSet(16, 0); + ConcurrentLongPairSet.newBuilder() + .expectedItems(16) + .concurrencyLevel(0) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok } try { - new ConcurrentLongPairSet(4, 8); + ConcurrentLongPairSet.newBuilder() + .expectedItems(4) + .concurrencyLevel(8) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok } } + @Test + public void testReduceUnnecessaryExpansions() { + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .build(); + assertTrue(set.add(1, 1)); + assertTrue(set.add(2, 2)); + assertTrue(set.add(3, 3)); + assertTrue(set.add(4, 4)); + + assertTrue(set.remove(1, 1)); + assertTrue(set.remove(2, 2)); + assertTrue(set.remove(3, 3)); + assertTrue(set.remove(4, 4)); + + assertEquals(0, set.getUsedBucketCount()); + } + @Test public void simpleInsertions() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(16); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(16) + .build(); assertTrue(set.isEmpty()); assertTrue(set.add(1, 1)); @@ -94,9 +123,64 @@ public void simpleInsertions() { assertEquals(set.size(), 3); } + @Test + public void testClear() { + ConcurrentLongPairSet map = ConcurrentLongPairSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertTrue(map.add(1, 1)); + assertTrue(map.add(2, 2)); + assertTrue(map.add(3, 3)); + + assertTrue(map.capacity() == 8); + map.clear(); + assertTrue(map.capacity() == 4); + } + + @Test + public void testExpandAndShrink() { + ConcurrentLongPairSet map = ConcurrentLongPairSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertTrue(map.add(1, 1)); + assertTrue(map.add(2, 2)); + assertTrue(map.add(3, 3)); + + // expand hashmap + assertTrue(map.capacity() == 8); + + assertTrue(map.remove(1, 1)); + // not shrink + assertTrue(map.capacity() == 8); + assertTrue(map.remove(2, 2)); + // shrink hashmap + assertTrue(map.capacity() == 4); + + // expand hashmap + assertTrue(map.add(4, 4)); + assertTrue(map.add(5, 5)); + assertTrue(map.capacity() == 8); + + //verify that the map does not keep shrinking at every remove() operation + assertTrue(map.add(6, 6)); + assertTrue(map.remove(6, 6)); + assertTrue(map.capacity() == 8); + } + + @Test public void testRemove() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); assertTrue(set.isEmpty()); assertTrue(set.add(1, 1)); @@ -111,7 +195,10 @@ public void testRemove() { @Test public void testRehashing() { int n = 16; - ConcurrentLongPairSet set = new ConcurrentLongPairSet(n / 2, 1); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); assertEquals(set.capacity(), n); assertEquals(set.size(), 0); @@ -126,7 +213,10 @@ public void testRehashing() { @Test public void testRehashingRemoval() { int n = 16; - ConcurrentLongPairSet set = new ConcurrentLongPairSet(n / 2, 1); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); assertEquals(set.capacity(), n); assertEquals(set.size(), 0); @@ -152,7 +242,10 @@ public void testRehashingRemoval() { @Test public void testRehashingWithDeletes() { int n = 16; - ConcurrentLongPairSet set = new ConcurrentLongPairSet(n / 2, 1); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); assertEquals(set.capacity(), n); assertEquals(set.size(), 0); @@ -177,7 +270,7 @@ public void testRehashingWithDeletes() { @Test public void concurrentInsertions() throws Throwable { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -210,7 +303,7 @@ public void concurrentInsertions() throws Throwable { @Test public void concurrentInsertionsAndReads() throws Throwable { - ConcurrentLongPairSet map = new ConcurrentLongPairSet(); + ConcurrentLongPairSet map = ConcurrentLongPairSet.newBuilder().build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -243,7 +336,7 @@ public void concurrentInsertionsAndReads() throws Throwable { @Test public void testIteration() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); assertEquals(set.items(), Collections.emptyList()); @@ -269,7 +362,7 @@ public void testIteration() { @Test public void testRemoval() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); set.add(0, 0); set.add(1, 1); @@ -295,7 +388,7 @@ public void testRemoval() { @Test public void testIfRemoval() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); set.add(0, 0); set.add(1, 1); @@ -319,7 +412,7 @@ public void testIfRemoval() { @Test public void testItems() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); int n = 100; int limit = 10; @@ -340,7 +433,10 @@ public void testItems() { @Test public void testHashConflictWithDeletion() { final int Buckets = 16; - ConcurrentLongPairSet set = new ConcurrentLongPairSet(Buckets, 1); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder() + .expectedItems(Buckets) + .concurrencyLevel(1) + .build(); // Pick 2 keys that fall into the same bucket long key1 = 1; @@ -375,7 +471,7 @@ public void testHashConflictWithDeletion() { @Test public void testEqualsObjects() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); long t1 = 1; long t2 = 2; @@ -397,7 +493,7 @@ public void testEqualsObjects() { @Test public void testToString() { - ConcurrentLongPairSet set = new ConcurrentLongPairSet(); + ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); set.add(0, 0); set.add(1, 1); diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java index e18012cdf13f2..cec52ea3ded64 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java @@ -22,6 +22,7 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; @@ -48,21 +49,29 @@ public class ConcurrentOpenHashMapTest { @Test public void testConstructor() { try { - new ConcurrentOpenHashMap(0); + ConcurrentOpenHashMap.newBuilder() + .expectedItems(0) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok } try { - new ConcurrentOpenHashMap(16, 0); + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(0) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok } try { - new ConcurrentOpenHashMap(4, 8); + ConcurrentOpenHashMap.newBuilder() + .expectedItems(4) + .concurrencyLevel(8) + .build(); fail("should have thrown exception"); } catch (IllegalArgumentException e) { // ok @@ -71,7 +80,10 @@ public void testConstructor() { @Test public void simpleInsertions() { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(16); + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .build(); assertTrue(map.isEmpty()); assertNull(map.put("1", "one")); @@ -97,9 +109,83 @@ public void simpleInsertions() { assertEquals(map.size(), 3); } + @Test + public void testReduceUnnecessaryExpansions() { + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .build(); + assertNull(map.put("1", "1")); + assertNull(map.put("2", "2")); + assertNull(map.put("3", "3")); + assertNull(map.put("4", "4")); + + assertEquals(map.remove("1"), "1"); + assertEquals(map.remove("2"), "2"); + assertEquals(map.remove("3"), "3"); + assertEquals(map.remove("4"), "4"); + + assertEquals(0, map.getUsedBucketCount()); + } + + @Test + public void testClear() { + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertNull(map.put("k1", "v1")); + assertNull(map.put("k2", "v2")); + assertNull(map.put("k3", "v3")); + + assertTrue(map.capacity() == 8); + map.clear(); + assertTrue(map.capacity() == 4); + } + + @Test + public void testExpandAndShrink() { + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertNull(map.put("k1", "v1")); + assertNull(map.put("k2", "v2")); + assertNull(map.put("k3", "v3")); + + // expand hashmap + assertTrue(map.capacity() == 8); + + assertTrue(map.remove("k1", "v1")); + // not shrink + assertTrue(map.capacity() == 8); + assertTrue(map.remove("k2", "v2")); + // shrink hashmap + assertTrue(map.capacity() == 4); + + // expand hashmap + assertNull(map.put("k4", "v4")); + assertNull(map.put("k5", "v5")); + assertTrue(map.capacity() == 8); + + //verify that the map does not keep shrinking at every remove() operation + assertNull(map.put("k6", "v6")); + assertTrue(map.remove("k6", "v6")); + assertTrue(map.capacity() == 8); + } + @Test public void testRemove() { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder().build(); assertTrue(map.isEmpty()); assertNull(map.put("1", "one")); @@ -116,7 +202,10 @@ public void testRemove() { @Test public void testRehashing() { int n = 16; - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(n / 2, 1); + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); assertEquals(map.capacity(), n); assertEquals(map.size(), 0); @@ -131,7 +220,11 @@ public void testRehashing() { @Test public void testRehashingWithDeletes() { int n = 16; - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(n / 2, 1); + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(n / 2) + .concurrencyLevel(1) + .build(); assertEquals(map.capacity(), n); assertEquals(map.size(), 0); @@ -153,7 +246,10 @@ public void testRehashingWithDeletes() { @Test public void concurrentInsertions() throws Throwable { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(16, 1); + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -187,7 +283,8 @@ public void concurrentInsertions() throws Throwable { @Test public void concurrentInsertionsAndReads() throws Throwable { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder().build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -221,7 +318,8 @@ public void concurrentInsertionsAndReads() throws Throwable { @Test public void testIteration() { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder().build(); assertEquals(map.keys(), Collections.emptyList()); assertEquals(map.values(), Collections.emptyList()); @@ -265,7 +363,10 @@ public void testIteration() { @Test public void testHashConflictWithDeletion() { final int Buckets = 16; - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(Buckets, 1); + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(Buckets) + .concurrencyLevel(1) + .build(); // Pick 2 keys that fall into the same bucket long key1 = 1; @@ -298,7 +399,8 @@ public void testHashConflictWithDeletion() { @Test public void testPutIfAbsent() { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder().build(); assertNull(map.putIfAbsent(1l, "one")); assertEquals(map.get(1l), "one"); @@ -308,7 +410,10 @@ public void testPutIfAbsent() { @Test public void testComputeIfAbsent() { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(16, 1); + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); AtomicInteger counter = new AtomicInteger(); Function provider = key -> counter.getAndIncrement(); @@ -349,7 +454,8 @@ public boolean equals(Object obj) { } } - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(); + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder().build(); T t1 = new T(1); T t1_b = new T(1); @@ -369,12 +475,50 @@ public boolean equals(Object obj) { assertNull(map.get(t1_b)); } + @Test + public void testNullValue() { + ConcurrentOpenHashMap map = + ConcurrentOpenHashMap.newBuilder() + .expectedItems(16) + .concurrencyLevel(1) + .build(); + String key = "a"; + assertThrows(NullPointerException.class, () -> map.put(key, null)); + + //put a null value. + assertNull(map.computeIfAbsent(key, k -> null)); + assertEquals(1, map.size()); + assertEquals(1, map.keys().size()); + assertEquals(1, map.values().size()); + assertNull(map.get(key)); + assertFalse(map.containsKey(key)); + + //test remove null value + map.removeNullValue(key); + assertTrue(map.isEmpty()); + assertEquals(0, map.keys().size()); + assertEquals(0, map.values().size()); + assertNull(map.get(key)); + assertFalse(map.containsKey(key)); + + + //test not remove non-null value + map.put(key, "V"); + assertEquals(1, map.size()); + map.removeNullValue(key); + assertEquals(1, map.size()); + + } + static final int Iterations = 1; static final int ReadIterations = 1000; static final int N = 1_000_000; public void benchConcurrentOpenHashMap() throws Exception { - ConcurrentOpenHashMap map = new ConcurrentOpenHashMap<>(N, 1); + ConcurrentOpenHashMap map = ConcurrentOpenHashMap.newBuilder() + .expectedItems(N) + .concurrencyLevel(1) + .build(); for (long i = 0; i < Iterations; i++) { for (int j = 0; j < N; j++) { diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java index 3c1d99668d733..6c82293bec29a 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java @@ -91,9 +91,86 @@ public void simpleInsertions() { assertEquals(set.size(), 3); } + @Test + public void testReduceUnnecessaryExpansions() { + ConcurrentOpenHashSet set = + ConcurrentOpenHashSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .build(); + + assertTrue(set.add("1")); + assertTrue(set.add("2")); + assertTrue(set.add("3")); + assertTrue(set.add("4")); + + assertTrue(set.remove("1")); + assertTrue(set.remove("2")); + assertTrue(set.remove("3")); + assertTrue(set.remove("4")); + assertEquals(0, set.getUsedBucketCount()); + } + + @Test + public void testClear() { + ConcurrentOpenHashSet set = + ConcurrentOpenHashSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(set.capacity() == 4); + + assertTrue(set.add("k1")); + assertTrue(set.add("k2")); + assertTrue(set.add("k3")); + + assertTrue(set.capacity() == 8); + set.clear(); + assertTrue(set.capacity() == 4); + } + + @Test + public void testExpandAndShrink() { + ConcurrentOpenHashSet map = + ConcurrentOpenHashSet.newBuilder() + .expectedItems(2) + .concurrencyLevel(1) + .autoShrink(true) + .mapIdleFactor(0.25f) + .build(); + assertTrue(map.capacity() == 4); + + assertTrue(map.add("k1")); + assertTrue(map.add("k2")); + assertTrue(map.add("k3")); + + // expand hashmap + assertTrue(map.capacity() == 8); + + assertTrue(map.remove("k1")); + // not shrink + assertTrue(map.capacity() == 8); + assertTrue(map.remove("k2")); + // shrink hashmap + assertTrue(map.capacity() == 4); + + // expand hashmap + assertTrue(map.add("k4")); + assertTrue(map.add("k5")); + assertTrue(map.capacity() == 8); + + //verify that the map does not keep shrinking at every remove() operation + assertTrue(map.add("k6")); + assertTrue(map.remove("k6")); + assertTrue(map.capacity() == 8); + } + @Test public void testRemove() { - ConcurrentOpenHashSet set = new ConcurrentOpenHashSet<>(); + ConcurrentOpenHashSet set = + ConcurrentOpenHashSet.newBuilder().build(); assertTrue(set.isEmpty()); assertTrue(set.add("1")); @@ -145,7 +222,8 @@ public void testRehashingWithDeletes() { @Test public void concurrentInsertions() throws Throwable { - ConcurrentOpenHashSet set = new ConcurrentOpenHashSet<>(); + ConcurrentOpenHashSet set = + ConcurrentOpenHashSet.newBuilder().build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -178,7 +256,8 @@ public void concurrentInsertions() throws Throwable { @Test public void concurrentInsertionsAndReads() throws Throwable { - ConcurrentOpenHashSet map = new ConcurrentOpenHashSet<>(); + ConcurrentOpenHashSet map = + ConcurrentOpenHashSet.newBuilder().build(); @Cleanup("shutdownNow") ExecutorService executor = Executors.newCachedThreadPool(); @@ -211,7 +290,7 @@ public void concurrentInsertionsAndReads() throws Throwable { @Test public void testIteration() { - ConcurrentOpenHashSet set = new ConcurrentOpenHashSet<>(); + ConcurrentOpenHashSet set = ConcurrentOpenHashSet.newBuilder().build(); assertEquals(set.values(), Collections.emptyList()); @@ -237,7 +316,8 @@ public void testIteration() { @Test public void testRemoval() { - ConcurrentOpenHashSet set = new ConcurrentOpenHashSet<>(); + ConcurrentOpenHashSet set = + ConcurrentOpenHashSet.newBuilder().build(); set.add(0); set.add(1); @@ -315,7 +395,8 @@ public boolean equals(Object obj) { } } - ConcurrentOpenHashSet set = new ConcurrentOpenHashSet<>(); + ConcurrentOpenHashSet set = + ConcurrentOpenHashSet.newBuilder().build(); T t1 = new T(1); T t1_b = new T(1); diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSetTest.java index f57b75d52e10c..ba29ee0d27f99 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenLongPairRangeSetTest.java @@ -19,7 +19,9 @@ package org.apache.pulsar.common.util.collections; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; import java.util.List; import java.util.Set; @@ -37,6 +39,17 @@ public class ConcurrentOpenLongPairRangeSetTest { static final LongPairConsumer consumer = (key, value) -> new LongPair(key, value); + @Test + public void testIsEmpty() { + ConcurrentOpenLongPairRangeSet set = new ConcurrentOpenLongPairRangeSet<>(consumer); + assertTrue(set.isEmpty()); + // lowerValueOpen and upperValue are both -1 so that an empty set will be added + set.addOpenClosed(0, -1, 0, -1); + assertTrue(set.isEmpty()); + set.addOpenClosed(1, 1, 1, 5); + assertFalse(set.isEmpty()); + } + @Test public void testAddForSameKey() { ConcurrentOpenLongPairRangeSet set = new ConcurrentOpenLongPairRangeSet<>(consumer); @@ -447,4 +460,24 @@ private List> getConnectedRange(Set> gRanges) { gRangeConnected.add(lastRange); return gRangeConnected; } + + @Test + public void testCardinality() { + ConcurrentOpenLongPairRangeSet set = new ConcurrentOpenLongPairRangeSet<>(consumer); + int v = set.cardinality(0, 0, Integer.MAX_VALUE, Integer.MAX_VALUE); + assertEquals(v, 0 ); + set.addOpenClosed(1, 0, 1, 20); + set.addOpenClosed(1, 30, 1, 90); + set.addOpenClosed(2, 0, 3, 30); + v = set.cardinality(1, 0, 1, 100); + assertEquals(v, 80); + v = set.cardinality(1, 11, 1, 100); + assertEquals(v, 70); + v = set.cardinality(1, 0, 1, 90); + assertEquals(v, 80); + v = set.cardinality(1, 0, 1, 80); + assertEquals(v, 70); + v = set.cardinality(1, 0, 3, 30); + assertEquals(v, 80 + 31); + } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java index 821bb8819554b..62dfa21dc81c9 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java @@ -22,7 +22,8 @@ import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertTrue; - +import com.google.common.collect.ComparisonChain; +import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -30,13 +31,10 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; - import lombok.Cleanup; import org.apache.pulsar.common.util.collections.ConcurrentLongPairSet.LongPair; import org.testng.annotations.Test; -import com.google.common.collect.Lists; - public class ConcurrentSortedLongPairSetTest { @Test @@ -184,6 +182,20 @@ public void testIfRemoval() { values = new ArrayList<>(set.items()); values.sort(null); assertEquals(values, Lists.newArrayList(new LongPair(6, 6), new LongPair(7, 7))); + + set = new ConcurrentSortedLongPairSet(128, 2, true); + set.add(2, 2); + set.add(1, 3); + set.add(3, 1); + set.add(2, 1); + set.add(3, 2); + set.add(1, 2); + set.add(1, 1); + removeItems = set.removeIf((ledgerId, entryId) -> { + return ComparisonChain.start().compare(ledgerId, 1).compare(entryId, 3) + .result() <= 0; + }); + assertEquals(removeItems, 3); } @Test @@ -241,4 +253,39 @@ public void testToString() { assertEquals(set.toString(), toString); } + @Test + public void testIsEmpty() { + LongPairSet set = new ConcurrentSortedLongPairSet(); + assertTrue(set.isEmpty()); + set.add(1, 1); + assertFalse(set.isEmpty()); + } + + @Test + public void testShrink() { + LongPairSet set = new ConcurrentSortedLongPairSet(2, 1, true); + set.add(0, 0); + assertTrue(set.capacity() == 4); + set.add(0, 1); + assertTrue(set.capacity() == 4); + set.add(1, 1); + assertTrue(set.capacity() == 8); + set.add(1, 2); + assertTrue(set.capacity() == 8); + set.add(1, 3); + set.add(1, 4); + set.add(1, 5); + assertTrue(set.capacity() == 12); + set.remove(1, 5); + // not shrink + assertTrue(set.capacity() == 12); + set.remove(1, 4); + // the internal map does not keep shrinking at every remove() operation + assertTrue(set.capacity() == 12); + set.remove(1, 3); + set.remove(1, 2); + set.remove(1, 1); + // shrink + assertTrue(set.capacity() == 8); + } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/FieldParserTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/FieldParserTest.java index c8a46cb0a1585..12d579bd8cd77 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/FieldParserTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/FieldParserTest.java @@ -32,6 +32,7 @@ import static org.testng.Assert.fail; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,6 +55,10 @@ public void testConversion() { assertEquals(integerToString(1), String.valueOf(1)); assertEquals(stringToList("1,2,3", Integer.class).get(2), Integer.valueOf(3)); assertTrue(stringToSet("1,2,3", Integer.class).contains(3)); + // the order of values should be preserved for a Set configuration item + assertEquals(new ArrayList<>(stringToSet("1,2,3", Integer.class)), Arrays.asList(1, 2, 3)); + assertEquals(new ArrayList<>(stringToSet("2,3,1", Integer.class)), Arrays.asList(2, 3, 1)); + assertEquals(new ArrayList<>(stringToSet("3,2,1", Integer.class)), Arrays.asList(3, 2, 1)); assertEquals(stringToBoolean("true"), Boolean.TRUE); assertEquals(stringToDouble("2.2"), Double.valueOf(2.2)); assertEquals(stringToLong("2"), Long.valueOf(2)); diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/SegmentedLongArrayTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/SegmentedLongArrayTest.java new file mode 100644 index 0000000000000..efb86fd4f9d48 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/SegmentedLongArrayTest.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util.collections; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; +import lombok.Cleanup; +import org.testng.annotations.Test; + +public class SegmentedLongArrayTest { + + @Test + public void testArray() { + @Cleanup + SegmentedLongArray a = new SegmentedLongArray(4); + assertEquals(a.getCapacity(), 4); + assertEquals(a.bytesCapacity(), 4 * 8); + assertEquals(a.getInitialCapacity(), 4); + + a.writeLong(0, 0); + a.writeLong(1, 1); + a.writeLong(2, 2); + a.writeLong(3, Long.MAX_VALUE); + + try { + a.writeLong(4, Long.MIN_VALUE); + fail("should have failed"); + } catch (IndexOutOfBoundsException e) { + // Expected + } + + a.increaseCapacity(); + + a.writeLong(4, Long.MIN_VALUE); + + assertEquals(a.getCapacity(), 8); + assertEquals(a.bytesCapacity(), 8 * 8); + assertEquals(a.getInitialCapacity(), 4); + + assertEquals(a.readLong(0), 0); + assertEquals(a.readLong(1), 1); + assertEquals(a.readLong(2), 2); + assertEquals(a.readLong(3), Long.MAX_VALUE); + assertEquals(a.readLong(4), Long.MIN_VALUE); + + a.shrink(5); + assertEquals(a.getCapacity(), 5); + assertEquals(a.bytesCapacity(), 5 * 8); + assertEquals(a.getInitialCapacity(), 4); + } + + @Test + public void testLargeArray() { + long initialCap = 3 * 1024 * 1024; + + @Cleanup + SegmentedLongArray a = new SegmentedLongArray(initialCap); + assertEquals(a.getCapacity(), initialCap); + assertEquals(a.bytesCapacity(), initialCap * 8); + assertEquals(a.getInitialCapacity(), initialCap); + + long baseOffset = initialCap - 100; + + a.writeLong(baseOffset, 0); + a.writeLong(baseOffset + 1, 1); + a.writeLong(baseOffset + 2, 2); + a.writeLong(baseOffset + 3, Long.MAX_VALUE); + a.writeLong(baseOffset + 4, Long.MIN_VALUE); + + a.increaseCapacity(); + + assertEquals(a.getCapacity(), 5 * 1024 * 1024); + assertEquals(a.bytesCapacity(), 5 * 1024 * 1024 * 8); + assertEquals(a.getInitialCapacity(), initialCap); + + assertEquals(a.readLong(baseOffset), 0); + assertEquals(a.readLong(baseOffset + 1), 1); + assertEquals(a.readLong(baseOffset + 2), 2); + assertEquals(a.readLong(baseOffset + 3), Long.MAX_VALUE); + assertEquals(a.readLong(baseOffset + 4), Long.MIN_VALUE); + + a.shrink(initialCap); + assertEquals(a.getCapacity(), initialCap); + assertEquals(a.bytesCapacity(), initialCap * 8); + assertEquals(a.getInitialCapacity(), initialCap); + } +} diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueueTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueueTest.java index 4cb1027e0a9ff..de3b1adb7929d 100644 --- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueueTest.java +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/TripleLongPriorityQueueTest.java @@ -54,6 +54,35 @@ public void testQueue() { pq.close(); } + @Test + public void testLargeQueue() { + TripleLongPriorityQueue pq = new TripleLongPriorityQueue(); + assertEquals(pq.size(), 0); + + final int N = 3_000_000; + + for (int i = N; i > 0; i--) { + pq.add(i, i * 2L, i * 3L); + } + + assertEquals(pq.size(), N); + assertFalse(pq.isEmpty()); + + for (int i = 1; i <= N; i++) { + assertEquals(pq.peekN1(), i); + assertEquals(pq.peekN2(), i * 2); + assertEquals(pq.peekN3(), i * 3); + + pq.pop(); + + assertEquals(pq.size(), N - i); + } + + pq.clear(); + pq.close(); + } + + @Test public void testCheckForEmpty() { TripleLongPriorityQueue pq = new TripleLongPriorityQueue(); @@ -135,4 +164,37 @@ public void testCompareWithSamePrefix() { pq.close(); } + + @Test + public void testShrink() throws Exception { + int initialCapacity = 20; + int tupleSize = 3 * 8; + TripleLongPriorityQueue pq = new TripleLongPriorityQueue(initialCapacity, 0.5f); + pq.add(0, 0, 0); + assertEquals(pq.size(), 1); + assertEquals(pq.bytesCapacity(), initialCapacity * tupleSize); + + // Scale out to capacity * 2 + triggerScaleOut(initialCapacity, pq); + int scaleCapacity = initialCapacity * 2; + assertEquals(pq.bytesCapacity(), scaleCapacity * tupleSize); + // Trigger shrinking + for (int i = 0; i < initialCapacity / 2 + 2; i++) { + pq.pop(); + } + int capacity = scaleCapacity - (int)((scaleCapacity ) * 0.5f * 0.9f); + assertTrue(pq.bytesCapacity() < scaleCapacity * tupleSize); + // Scale out to capacity * 2 + triggerScaleOut(initialCapacity, pq); + scaleCapacity = capacity * 2; + // Trigger shrinking + pq.clear(); + capacity = scaleCapacity - (int)(scaleCapacity * 0.5f * 0.9f); + } + + private void triggerScaleOut(int initialCapacity, TripleLongPriorityQueue pq) { + for (long i = 0; i < initialCapacity + 1; i++) { + pq.add(i, i, i); + } + } } diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/netty/DnsResolverTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/netty/DnsResolverTest.java new file mode 100644 index 0000000000000..0ccb960e79887 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/netty/DnsResolverTest.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.common.util.netty; + +import io.netty.channel.EventLoop; +import io.netty.resolver.dns.DnsNameResolverBuilder; +import org.mockito.Mockito; +import org.testng.Assert; +import org.testng.annotations.Test; + +public class DnsResolverTest { + + @Test + public void testMaxTtl() { + EventLoop eventLoop = Mockito.mock(EventLoop.class); + DnsNameResolverBuilder dnsNameResolverBuilder = new DnsNameResolverBuilder(eventLoop); + DnsResolverUtil.applyJdkDnsCacheSettings(dnsNameResolverBuilder); + // If the maxTtl is <=0, it will throw IllegalArgumentException. + try { + dnsNameResolverBuilder.build(); + } catch (Exception ex) { + Assert.assertFalse(ex instanceof IllegalArgumentException); + } + } +} diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/netty/SslContextTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/netty/SslContextTest.java new file mode 100644 index 0000000000000..0fbd2521ae08d --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/netty/SslContextTest.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.common.util.netty; + +import static org.testng.Assert.assertThrows; +import com.google.common.io.Resources; +import io.netty.handler.ssl.SslProvider; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.util.HashSet; +import java.util.Set; +import javax.net.ssl.SSLException; +import org.apache.pulsar.client.api.AuthenticationDataProvider; +import org.apache.pulsar.client.api.KeyStoreParams; +import org.apache.pulsar.common.util.NettyClientSslContextRefresher; +import org.apache.pulsar.common.util.NettyServerSslContextBuilder; +import org.apache.pulsar.common.util.keystoretls.NettySSLContextAutoRefreshBuilder; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +public class SslContextTest { + @DataProvider(name = "caCertSslContextDataProvider") + public static Object[][] getSslContextDataProvider() { + Set ciphers = new HashSet<>(); + ciphers.add("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"); + ciphers.add("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"); + ciphers.add("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"); + ciphers.add("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"); + ciphers.add("TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"); + + // Note: OPENSSL doesn't support these ciphers. + return new Object[][]{ + new Object[]{SslProvider.JDK, ciphers}, + new Object[]{SslProvider.JDK, null}, + + new Object[]{SslProvider.OPENSSL, ciphers}, + new Object[]{SslProvider.OPENSSL, null}, + + new Object[]{null, ciphers}, + new Object[]{null, null}, + }; + } + + @DataProvider(name = "cipherDataProvider") + public static Object[] getCipher() { + Set cipher = new HashSet<>(); + cipher.add("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"); + cipher.add("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"); + cipher.add("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"); + cipher.add("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"); + cipher.add("TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"); + + return new Object[]{null, cipher}; + } + + @Test(dataProvider = "cipherDataProvider") + public void testServerKeyStoreSSLContext(Set cipher) throws Exception { + NettySSLContextAutoRefreshBuilder contextAutoRefreshBuilder = new NettySSLContextAutoRefreshBuilder(null, + "JKS", Resources.getResource("ssl/jetty_server_key.jks").getPath(), + "jetty_server_pwd", false, "JKS", + Resources.getResource("ssl/jetty_server_trust.jks").getPath(), + "jetty_server_pwd", true, cipher, + null, 600); + contextAutoRefreshBuilder.update(); + } + + private static class ClientAuthenticationData implements AuthenticationDataProvider { + @Override + public KeyStoreParams getTlsKeyStoreParams() { + return null; + } + } + + @Test(dataProvider = "cipherDataProvider") + public void testClientKeyStoreSSLContext(Set cipher) throws Exception { + NettySSLContextAutoRefreshBuilder contextAutoRefreshBuilder = new NettySSLContextAutoRefreshBuilder(null, + false, "JKS", Resources.getResource("ssl/jetty_server_trust.jks").getPath(), + "jetty_server_pwd", cipher, null, 0, new ClientAuthenticationData()); + contextAutoRefreshBuilder.update(); + } + + @Test(dataProvider = "caCertSslContextDataProvider") + public void testServerCaCertSslContextWithSslProvider(SslProvider sslProvider, Set ciphers) + throws GeneralSecurityException, IOException { + NettyServerSslContextBuilder sslContext = new NettyServerSslContextBuilder(sslProvider, + true, Resources.getResource("ssl/my-ca/ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-ca.pem").getPath(), + Resources.getResource("ssl/my-ca/server-key.pem").getPath(), + ciphers, + null, + true, 60); + if (ciphers != null) { + if (sslProvider == null || sslProvider == SslProvider.OPENSSL) { + assertThrows(SSLException.class, sslContext::update); + return; + } + } + sslContext.update(); + } + + @Test(dataProvider = "caCertSslContextDataProvider") + public void testClientCaCertSslContextWithSslProvider(SslProvider sslProvider, Set ciphers) + throws GeneralSecurityException, IOException { + NettyClientSslContextRefresher sslContext = new NettyClientSslContextRefresher(sslProvider, + true, Resources.getResource("ssl/my-ca/ca.pem").getPath(), + null, ciphers, null, 0); + if (ciphers != null) { + if (sslProvider == null || sslProvider == SslProvider.OPENSSL) { + assertThrows(SSLException.class, sslContext::update); + return; + } + } + sslContext.update(); + } +} diff --git a/pulsar-common/src/test/java/org/apache/pulsar/policies/data/loadbalancer/LocalBrokerDataTest.java b/pulsar-common/src/test/java/org/apache/pulsar/policies/data/loadbalancer/LocalBrokerDataTest.java new file mode 100644 index 0000000000000..69d4a7f4cd198 --- /dev/null +++ b/pulsar-common/src/test/java/org/apache/pulsar/policies/data/loadbalancer/LocalBrokerDataTest.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.policies.data.loadbalancer; + +import com.google.gson.Gson; +import org.testng.Assert; +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; + +public class LocalBrokerDataTest { + + @Test + public void testLocalBrokerDataDeserialization() { + String data = "{\"webServiceUrl\":\"http://10.244.2.23:8080\",\"webServiceUrlTls\":\"https://10.244.2.23:8081\",\"pulsarServiceUrlTls\":\"pulsar+ssl://10.244.2.23:6651\",\"persistentTopicsEnabled\":true,\"nonPersistentTopicsEnabled\":false,\"cpu\":{\"usage\":3.1577712104798255,\"limit\":100.0},\"memory\":{\"usage\":614.0,\"limit\":1228.0},\"directMemory\":{\"usage\":32.0,\"limit\":1228.0},\"bandwidthIn\":{\"usage\":0.0,\"limit\":0.0},\"bandwidthOut\":{\"usage\":0.0,\"limit\":0.0},\"msgThroughputIn\":0.0,\"msgThroughputOut\":0.0,\"msgRateIn\":0.0,\"msgRateOut\":0.0,\"lastUpdate\":1650886425227,\"lastStats\":{\"pulsar/pulsar/10.244.2.23:8080/0x00000000_0xffffffff\":{\"msgRateIn\":0.0,\"msgThroughputIn\":0.0,\"msgRateOut\":0.0,\"msgThroughputOut\":0.0,\"consumerCount\":0,\"producerCount\":0,\"topics\":1,\"cacheSize\":0}},\"numTopics\":1,\"numBundles\":1,\"numConsumers\":0,\"numProducers\":0,\"bundles\":[\"pulsar/pulsar/10.244.2.23:8080/0x00000000_0xffffffff\"],\"lastBundleGains\":[],\"lastBundleLosses\":[],\"brokerVersionString\":\"2.11.0-hw-0.0.4-SNAPSHOT\",\"protocols\":{},\"advertisedListeners\":{},\"bundleStats\":{\"pulsar/pulsar/10.244.2.23:8080/0x00000000_0xffffffff\":{\"msgRateIn\":0.0,\"msgThroughputIn\":0.0,\"msgRateOut\":0.0,\"msgThroughputOut\":0.0,\"consumerCount\":0,\"producerCount\":0,\"topics\":1,\"cacheSize\":0}},\"maxResourceUsage\":0.49645519256591797,\"loadReportType\":\"LocalBrokerData\"}"; + Gson gson = new Gson(); + LocalBrokerData localBrokerData = gson.fromJson(data, LocalBrokerData.class); + Assert.assertEquals(localBrokerData.getMemory().limit, 1228.0d, 0.0001f); + Assert.assertEquals(localBrokerData.getMemory().usage, 614.0d, 0.0001f); + Assert.assertEquals(localBrokerData.getMemory().percentUsage(), ((float) localBrokerData.getMemory().usage) / ((float) localBrokerData.getMemory().limit) * 100, 0.0001f); + } + + @Test + public void testMaxResourceUsage() { + LocalBrokerData data = new LocalBrokerData(); + data.setCpu(new ResourceUsage(1.0, 100.0)); + data.setMemory(new ResourceUsage(800.0, 200.0)); + data.setDirectMemory(new ResourceUsage(2.0, 100.0)); + data.setBandwidthIn(new ResourceUsage(3.0, 100.0)); + data.setBandwidthOut(new ResourceUsage(4.0, 100.0)); + + double epsilon = 0.00001; + double weight = 0.5; + // skips memory usage + assertEquals(data.getMaxResourceUsage(), 0.04, epsilon); + + assertEquals( + data.getMaxResourceUsageWithWeight( + weight, weight, weight, weight, weight), 2.0, epsilon); + + assertEquals( + data.getMaxResourceUsageWithWeightWithinLimit( + weight, weight, weight, weight, weight), 0.02, epsilon); + + } +} diff --git a/pulsar-common/src/test/resources/ca/multiple-ca.pem b/pulsar-common/src/test/resources/ca/multiple-ca.pem new file mode 100644 index 0000000000000..15f136a695391 --- /dev/null +++ b/pulsar-common/src/test/resources/ca/multiple-ca.pem @@ -0,0 +1,36 @@ +-----BEGIN CERTIFICATE----- +MIIC8jCCAdqgAwIBAgIUPnoDe05/dkrbpa2vpmnp45e6/4UwDQYJKoZIhvcNAQEL +BQAwEDEOMAwGA1UEAxMFdGVzdDEwIBcNMjIwMzIyMDkxOTAwWhgPMjEyMjAzMjQw +OTE5MDBaMBAxDjAMBgNVBAMTBXRlc3QxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAvbd4hZAwrgShqrA6g7QKyXQw/4TvskSBT411XCu9+ZIvG4tEafUJ +CjuHv6AEAt8XN9DSNzVB64q/cOczskaOMa/MQd6Qe+peAiqUsFyu6vucQyCWOLz6 +iuvjPyhuIL5ZYbh17CtXNZOn50BYzq95K4vcAvNUxq/HAGnAm2HegSujq4IMaVpU +gBE3OinUf6patbGqDDuPRUy/gw3I/+xkQcP9RxZqmbsvc6tw6ZpejBdCunCF9hxH +p1V70AqNlxUo7H2w7O7gSDU17gzq8kYoyyiJSLS4Wh1nDscpCQykcxYtS+Agb4VZ +GOYxWMyIBvhRHXLfPVSaYRKw5t5cVy4GjQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQULlE62PqE/wu4x4BmUmnE5Xfw +8rowDQYJKoZIhvcNAQELBQADggEBAEVd7o4Gdm9jpsIFuq8879q3XTmdVvLFL5/6 +g/AddloVeyznd6J1vP4n/4fcIskJ084SD1g8FXG21hOb4vQR06E1qWYhxgJJs7fz +kY3nInbmEWba4Sg7dHXL1KnKOCkhq25UlFF2sMI5BSyKwwAi1R7PKdbTFXZuwFL+ +bKJIvegh+jawlFi1LbSjYYRTy4GgSE8f8/T0xVjqNjdxBnExEkV/dklUJgxck1b1 +K7fRAeoB65tpO/jvYeoQGu1kJkUNmbJu8k1TbBJQm9AxR6OaE+ZKFCf3U5wqH3Ff +hvrO1utY5yvTUnZ2EMTBytH5cGQ+8zW9tm00AB8eimCb4360l/8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC+jCCAeKgAwIBAgIUEyTKRqhwRp7VqaWPm50zo+CWrt0wDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAxMJbG9jYWxob3N0MCAXDTIyMDMyMTA5NTkwMFoYDzIxMjIw +MzIzMDk1OTAwWjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDrVbbVOJwhdFzi5hueWKwqHQ8OKtWup8lsOIQXbpc0 +T3/FPclF7Qo481SFBUH9Kg+kXJvxYS/sy/9VnoCq/UaUpaOZ6DQTM09bS9b1LOLM +EjXv9sMJ82ipQiwG/MOCGtuDHV++Hmf1lMej0pULL6WpBUhbIYWauiUWLlgLzc1u +v4JZcO/AuBl+tli49Af1ODGWQ4kJYESv27IDU0Jv+/HyE4fmm82vJEqAwjnjxmek +vsFpBvVK6dPUpTJ4hmG4pRrs4MzyxWBGi4PlWhka0LoT8pJ7gcykABToj3gt4Dmz +vVS1LoPq5ph5XgqE+8OHlIMaYIrG6fSxzFXQTyzp27pNAgMBAAGjQjBAMA4GA1Ud +DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR26y6V2jSvt5Dh +n2yaI0YW/nK2aTANBgkqhkiG9w0BAQsFAAOCAQEAxThI6kyDeKajNcnYP6urFO/d +7j/Yvhm1m/xsbg3Ou0iwJqygiJ+IC+jsVzA/tZE1TSX/Yn2KkGdc6vtZVTqESkSi +Gjxp36M1mhLKr/s4pspzSB+8pIOnhOBO2hcZ31DuWASv4AGpIT6XnuoK0KWaJvo8 +Dwbv1D89m5E2WickT4G/QLtbd05Ens/5BrrWW9Lt3f1IxffRWuTBdM7D7a/fF3zF +PpMWCAwmDeDwB9fbyBMtXo+Hd+R1YoeO5X5f0F4HO6VcVo+AkUNxs7FETYAiAQXn +yUYS/bCWHY6eeb67siCLtt4FprkYHt4SQHwKU1V4YmoEE7O/YN9IBEEvKVJZSg== +-----END CERTIFICATE----- diff --git a/pulsar-common/src/test/resources/ca/single-ca.pem b/pulsar-common/src/test/resources/ca/single-ca.pem new file mode 100644 index 0000000000000..2cf2d06f97a67 --- /dev/null +++ b/pulsar-common/src/test/resources/ca/single-ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC8jCCAdqgAwIBAgIUPnoDe05/dkrbpa2vpmnp45e6/4UwDQYJKoZIhvcNAQEL +BQAwEDEOMAwGA1UEAxMFdGVzdDEwIBcNMjIwMzIyMDkxOTAwWhgPMjEyMjAzMjQw +OTE5MDBaMBAxDjAMBgNVBAMTBXRlc3QxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAvbd4hZAwrgShqrA6g7QKyXQw/4TvskSBT411XCu9+ZIvG4tEafUJ +CjuHv6AEAt8XN9DSNzVB64q/cOczskaOMa/MQd6Qe+peAiqUsFyu6vucQyCWOLz6 +iuvjPyhuIL5ZYbh17CtXNZOn50BYzq95K4vcAvNUxq/HAGnAm2HegSujq4IMaVpU +gBE3OinUf6patbGqDDuPRUy/gw3I/+xkQcP9RxZqmbsvc6tw6ZpejBdCunCF9hxH +p1V70AqNlxUo7H2w7O7gSDU17gzq8kYoyyiJSLS4Wh1nDscpCQykcxYtS+Agb4VZ +GOYxWMyIBvhRHXLfPVSaYRKw5t5cVy4GjQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQULlE62PqE/wu4x4BmUmnE5Xfw +8rowDQYJKoZIhvcNAQELBQADggEBAEVd7o4Gdm9jpsIFuq8879q3XTmdVvLFL5/6 +g/AddloVeyznd6J1vP4n/4fcIskJ084SD1g8FXG21hOb4vQR06E1qWYhxgJJs7fz +kY3nInbmEWba4Sg7dHXL1KnKOCkhq25UlFF2sMI5BSyKwwAi1R7PKdbTFXZuwFL+ +bKJIvegh+jawlFi1LbSjYYRTy4GgSE8f8/T0xVjqNjdxBnExEkV/dklUJgxck1b1 +K7fRAeoB65tpO/jvYeoQGu1kJkUNmbJu8k1TbBJQm9AxR6OaE+ZKFCf3U5wqH3Ff +hvrO1utY5yvTUnZ2EMTBytH5cGQ+8zW9tm00AB8eimCb4360l/8= +-----END CERTIFICATE----- + diff --git a/pulsar-common/src/test/resources/ssl/jetty_client_key.jks b/pulsar-common/src/test/resources/ssl/jetty_client_key.jks new file mode 100644 index 0000000000000..2b8ea64347ddc Binary files /dev/null and b/pulsar-common/src/test/resources/ssl/jetty_client_key.jks differ diff --git a/pulsar-common/src/test/resources/ssl/jetty_client_trust.jks b/pulsar-common/src/test/resources/ssl/jetty_client_trust.jks new file mode 100644 index 0000000000000..166a2e00fb371 Binary files /dev/null and b/pulsar-common/src/test/resources/ssl/jetty_client_trust.jks differ diff --git a/pulsar-common/src/test/resources/ssl/jetty_server_key.jks b/pulsar-common/src/test/resources/ssl/jetty_server_key.jks new file mode 100644 index 0000000000000..b6189b75c8ad0 Binary files /dev/null and b/pulsar-common/src/test/resources/ssl/jetty_server_key.jks differ diff --git a/pulsar-common/src/test/resources/ssl/jetty_server_trust.jks b/pulsar-common/src/test/resources/ssl/jetty_server_trust.jks new file mode 100644 index 0000000000000..b09cc030a71c3 Binary files /dev/null and b/pulsar-common/src/test/resources/ssl/jetty_server_trust.jks differ diff --git a/pulsar-common/src/test/resources/ssl/my-ca/ca.pem b/pulsar-common/src/test/resources/ssl/my-ca/ca.pem new file mode 100644 index 0000000000000..3d5a80e234784 --- /dev/null +++ b/pulsar-common/src/test/resources/ssl/my-ca/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9DCCAdygAwIBAgIUNbNkV2+K2Hf4Q1V5gdAENZQiLokwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAxMGUHVsc2FyMCAXDTIyMDExNDA0MjgwMFoYDzIxMjIwMTE2 +MDQyODAwWjARMQ8wDQYDVQQDEwZQdWxzYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDBR2K5EKVziLqdsz78efEW4lOwKiJ32e97uxn1Z6oKgkgImpVP +Z9aoJB4EwSnDg+6FV2YULdWPm7C6W33tDmWRaU/Hlo/cOejnK8UmiMu/EyDpE2Wj +n0RimGmwOkBi2IWIcIzWMmPDZ9kZc65OUeEmwZedKRy62PQyfCeNU4OOHQn3PXjI +NbXJZD5TvBmn4SJn2RP9EgmIPaBAh/Mng045ZeHHLhwMKC8EOyHc2aB7AL6brymR +xzsiYWdcJn4mqqMvT82mVvhkgAMOcR4CXYF8eYnsG6ZbDHb13CawcvLVREJZk7AB +XZi9Rd5xczxHILM8rdkIZfunaG1X5hbih5wJAgMBAAGjQjBAMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTCC1lYG+62cUPjNk9q4jCm +Ps65njANBgkqhkiG9w0BAQsFAAOCAQEAKV2Lpu5cH5EsG53EWsYxEKvuQZ0LTxCE +wCDf/NxJaQbzfv0tsbZatMge0vcZ/5r8tZZoOC+pGTwk6MaRbEFH8PmvlH1LIQvu +Y34/YQZOy8wBTWwaIfFMnYWc0iAFoFt2Lzuq+GOI+svTFp729Ae8r7UxY/f9Lioc +ttdGr7vA6PpcIMoEIPjVp+m41uL9IDfX8eOxg4gVlwtqpbHdTzMrOz0YY+3qH/WK +6Qffw4pwitzAEj2zCn2lvGC5cbpd13SAaqtB3xL/Aet0SS2r3g9qDo1RruQhXUng +06U/Hqtn5K1fNQv3pivi3Jg5z1DfJWHkH37luAoIlOZHRmPK6rhp/g== +-----END CERTIFICATE----- diff --git a/pulsar-common/src/test/resources/ssl/my-ca/client-ca.pem b/pulsar-common/src/test/resources/ssl/my-ca/client-ca.pem new file mode 100644 index 0000000000000..adcae3393ade1 --- /dev/null +++ b/pulsar-common/src/test/resources/ssl/my-ca/client-ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgSgAwIBAgIUJJpmKX3DnbUwJ7tUhCt8MTiwz0owDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAxMGUHVsc2FyMCAXDTIyMDExNDA0MjgwMFoYDzIxMjExMjIx +MDQyODAwWjARMQ8wDQYDVQQDEwZQdWxzYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDZN+CNZ1i1WaXulbwSASOfXErWXhGV9DHqavPp3DohgQdundfS +648T/X80uWQlyxu4L4j0oc97jtzc1AyZFXj5nocVsveEO9aDjnYCc5NdBNJLQHgl +IO59fEpTd55NO24g9a8/sxgn0ADCenMlngk1Ou+2QJBONw7W12/WUSUg6ICe+b+x +qPzgApue16oGw9HxhPwa3oEvVZrEnFIWLjsSWtezhgFHMCH9/ngk0KlRyes/EZCz +ZgkO5mgii2fmNDg+yuWUfw7Q0x6BJskGIrxisJiJBRR1+DIvJqgqxJsNmeeEQrZK +YHBukj5RWDFOpOHgqFbPsv45sVKoLrGFrMnNAgMBAAGjajBoMA4GA1UdDwEB/wQE +AwIFoDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBSwkx93xjYP4I+dcFF3xS9NLesmFjAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJ +KoZIhvcNAQELBQADggEBAAK3ZF63w46pT76QIOeSM3ocUm6izvW/IrxLUESfgRC4 +gg0/5VfPiHHUe6orn15KuPXHe7xCUFqc2oFn5aIU1B/6iOPeNItvMJidU0a3UAiw +hFK9MSFgESNBiEnu1dE5tPcIIxTyCFQ/8loeY3dsdcNVoguH/2J9v/XcMMga46A1 +wudaaa1nb+ZYnXkRuyObKVJQN7EqC+4edinMOTPBbF9wtRMAMBRHXXENXb9zFthi +Dbdn4YvadYsNHxh5ar+hQn/HSPMuCUPY/uUqxtBagb6aS0YnSoUscSLs1Jizg5NX +d+QV8X/5E6W4xWnptUZwVxOemkdnr6A8MH1eQKKFZTM= +-----END CERTIFICATE----- diff --git a/pulsar-common/src/test/resources/ssl/my-ca/client-key.pem b/pulsar-common/src/test/resources/ssl/my-ca/client-key.pem new file mode 100644 index 0000000000000..5b08b151c8094 --- /dev/null +++ b/pulsar-common/src/test/resources/ssl/my-ca/client-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDZN+CNZ1i1WaXu +lbwSASOfXErWXhGV9DHqavPp3DohgQdundfS648T/X80uWQlyxu4L4j0oc97jtzc +1AyZFXj5nocVsveEO9aDjnYCc5NdBNJLQHglIO59fEpTd55NO24g9a8/sxgn0ADC +enMlngk1Ou+2QJBONw7W12/WUSUg6ICe+b+xqPzgApue16oGw9HxhPwa3oEvVZrE +nFIWLjsSWtezhgFHMCH9/ngk0KlRyes/EZCzZgkO5mgii2fmNDg+yuWUfw7Q0x6B +JskGIrxisJiJBRR1+DIvJqgqxJsNmeeEQrZKYHBukj5RWDFOpOHgqFbPsv45sVKo +LrGFrMnNAgMBAAECggEATeVZ45uiFja16J9NuG8sJSPluoY1bD8L/3KnUcAmIImy +7powIXVT8+k+StwI6/ywThbN2FyGmVqcHZz1f5hRr8KH0uJBHOyQetEFxM9Jk1v9 +Rfsymq36mImP5erJnAyp66vvUrqY+P4Ap71duam4x5wBBqyUk1fvPGA5vPOQiwHs +TN9JHizGobY25fpigWKIMamyE7HWXEUzVdOo83ZiNx53ths+WcF/kqto2v5LtyfJ +HgoPocfZI8tRz9tfgc8zOkvyjsvgdd6rLhd0r2oExnyQBJdktGFpQZMGambU328u +NqcdJscjP/HWAHRzuSdOvCMOEn8E5GIjcWEnQqOmSQKBgQDcpb655/UdcVxrv2Ou +8juucDJMpf6i/UcmlXVXx+3zGSuQZcCC2fupe3JcxPdK7bo65YlC3OoRihggh2sS +cnFMNHMfyoE3G/doXIr3QyL9UAQt4yb+7Nz7jRXYcg4Ytv+FVS6BSzIDEK17v+es +GuWDM3JwtigtzYS4tRh7lgmuBwKBgQD8BXp7yIyVv657B8OJJSoeGataziFPhZux +WKoS3gq24169ZWXwLc+nwrdgvBNrRaHuX+cYh93RF9+2WZrRcRL41XqN938adasY +zPsfOJa9IOgUzQtGUMSe1/WqvHfcvqZCqYq4u/LSdf+I67woP4tCqqn4E928aIZb +6PjLH+dUiwKBgH1ntn7y1t1lEKIspPtJsaHzIqNttMvuKAJF7+t0Nkl0hM4NBt1Y +BzDMeLNBP0vW0YGn89uMs3xEgHH8hV52rO4i4UuwTMCFpJgsAM+H2NsgHz/1WrSI +6xANn9zk9h4V5CRjxYq2sjYLxI4RBBtNLiTjmKd24F8n78cLJl8XZ2kBAoGAGoHF +ATH1v2ZaxqvpYApdpK7UfAeEL2YBGyUVNkjOXbAKbec1Uo6u8ZkkSnNdo4G+Z2EE +4Gqh5PUa3YYNJ4w6D5v8eOQYJUNNDJ26p+z+xcOpRU7PqcSi+YYDW8LY5InU2NwW +MBnsj0BD8TXCI4WTcx6aI/KK9t8TiqU1Tb/8R8MCgYANVinOLz2enB+Qzu4o88W/ +witKHI3D9+z/uWjp0Q4rwmr3OL4FD9vZWvL4qwbDgpfLirJ4e3UVfN1/FoytAKlk +Kykf8oDWciCIdxStt/yUpgQv78IL3vM5d9B8Qb7KCRtJ0BIXGJ7Gle3xJeuduZLe ++F+hwI3Dpv5HPqa9o6ttJw== +-----END PRIVATE KEY----- diff --git a/pulsar-common/src/test/resources/ssl/my-ca/server-ca.pem b/pulsar-common/src/test/resources/ssl/my-ca/server-ca.pem new file mode 100644 index 0000000000000..df5f69298e258 --- /dev/null +++ b/pulsar-common/src/test/resources/ssl/my-ca/server-ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgSgAwIBAgIUVQHD0/oi9Ca50HA7DFLYOO2wEzYwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAxMGUHVsc2FyMCAXDTIyMDExNDA0MjgwMFoYDzIxMjExMjIx +MDQyODAwWjARMQ8wDQYDVQQDEwZQdWxzYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDBcqDkMhjLd9ik//UQijqbajQP5t6dvVZNn9gODQrS9oB/URur +NzCcPWYPJZfEJlTkV8mlmgq4dBjwghpy5ALOGiERk55JPIN4cy01hQ6j7YSPFvMv +BjqZvm5dpGDNTr7GY7THegMM1wpk9EaUOm7tBOHtf6ZnANjSMcQM74RCSBt0Koqw +06CKVDCbgJ5NNE1LgwYeVQAwtQAhY8rqqQKJvCorFbq7OiisFBnz5pRBT6N4kMo1 +9LZo3Oe2F2w9eH9vacQ0NjSOCNXqal9Xl/Pwy9JgKKppwZ/3nCgRc+yfjrnkRz0f +b+llb2NpR5Ge+tNMakqelE8bDSw/5BPjRPftAgMBAAGjajBoMA4GA1UdDwEB/wQE +AwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBRXws5mmLbW+xOLflUyUZ0I0uN96zAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJ +KoZIhvcNAQELBQADggEBAKMklpYJIkp4icz9Ea5wWQiRXWb94lGdyCA833VHeGB2 +fKvNXj1d6lEiy26pOjhDmycroKelj70WqOsqVgi4xh4Y9sj6pwb8Q423Tu3qNO1k +qaScTar2DANSigNzqlSbLshPWQ2ZyDwkvZPuqPgHzOXekzbUGwxgCiySaQkl2mCS +mBaG3XnESwiMIKkLphEv0MAvTVaImbSRWYEQ4OECwcHXxx+14wK8NLcdDIHcSzki +8Eq24CxDOeL5QxciGMi5tylsdCpT+D/BXTKiu46yoRjXUsTLYL53yUZZIqQ3A4CV +enZ/vHhP0Ev9RcRigFTqrBm7EC3b2AUpvqgRMnPwQZo= +-----END CERTIFICATE----- diff --git a/pulsar-common/src/test/resources/ssl/my-ca/server-key.pem b/pulsar-common/src/test/resources/ssl/my-ca/server-key.pem new file mode 100644 index 0000000000000..a3f3a36b73c37 --- /dev/null +++ b/pulsar-common/src/test/resources/ssl/my-ca/server-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDBcqDkMhjLd9ik +//UQijqbajQP5t6dvVZNn9gODQrS9oB/URurNzCcPWYPJZfEJlTkV8mlmgq4dBjw +ghpy5ALOGiERk55JPIN4cy01hQ6j7YSPFvMvBjqZvm5dpGDNTr7GY7THegMM1wpk +9EaUOm7tBOHtf6ZnANjSMcQM74RCSBt0Koqw06CKVDCbgJ5NNE1LgwYeVQAwtQAh +Y8rqqQKJvCorFbq7OiisFBnz5pRBT6N4kMo19LZo3Oe2F2w9eH9vacQ0NjSOCNXq +al9Xl/Pwy9JgKKppwZ/3nCgRc+yfjrnkRz0fb+llb2NpR5Ge+tNMakqelE8bDSw/ +5BPjRPftAgMBAAECggEBAJm2JsgMUo1ihn/dbnIdFCKoCgRUs7FtYCVADOJlVKN7 +AXGpFi4/JV4Qn4cLnQNcXfovE2iF9VzJy4NYLgH60YvJUVtxC8Yv0lukUVkEiDST +p9A3MTa9YVUG7xVzZwPcPVTQpzYV6lSKjpTXUTm5EKk/RvJ7itKv5plmt9x7eYFb +/JwqXo1Z6C4gfIFR85LWmrCsNUK5T9oooLz88D6+ZH3+fWlr75RDff2kqdLshMTs +N0Ov7NXcRFeruFs/IPrgTxjBMeNa2LFdYVPeeQ41L4uOI49uVBAmSn1be+THvDoj +Do+6wTEF/h6/VLoOaIFZZdHlqd4is+xcEg8gwVkCn2ECgYEAxqVvGKc9qaqEVwBx +U5Ru9OFx0NqEBvkYZRbCg1REcMFd3lqFTHvHiF3pmCp0XgLJKYuy42618IJXhj6D +Y15/p9jX0025MpnH/AdwpO6x5pv6gb/JOMnHOnq8sI3R+V6TVsv1WZj0sOj94mF0 ++Od++bQkUnSlfE4X7v+cJfo/Q8UCgYEA+Uz1yOyI9Dv1dEdBMdBA8MTriYU0uJCV +dVKzL/uC9XyguVBWu1HX0MvEKyjPRycvLB7TuQqAFLgCtC8EEuPGBpWtyXOm9Jxw +ToCfUZFuBQeMuf4vZcFgJjiEKTdKBxrvjkhyIhPR6JAy0WUr8Ry+ZtqvmG5NOEz5 +ptm1tznYngkCgYEAlckeyV8p/uqF2biKu3QcamgoU0zB6yQfAfK0fySmasNTzZtC +EhbvsOLnhgbVMiI1ny8ol5fedtlBuAchOWeDKIQ40as0r3QHuQG/LY6S9Im+zeFY +kIqNwInWB+cYYkmvHe6zNXlBYLh+4BmOgzTDqPPtw4MTWXTlVSDGlFhrJeUCgYBX +7rlS4Xt9ChkNpoRsWZROWGbr3rw1zWmqND1X01Lh28+lDZ1J/RguYXET+BUEd+G/ +oi/zuKxsomrxuxOoxgZ3FBx0TgK5jORgDCYl0zIHPB57DBkTvx123cBf+Ux3LR0K +BqubMXp8mUATc6gIJ6dRCBmfnmhGT4BPRcM+mXy6YQKBgGEGH37VABus+Oi3g1bk +qEAaUI1asRLJIfbY2ImxEroLIQAbTFuIQUsZTKpT7jJZubjYvy1Fev0LU/n7Kv2w +7ym41z70ro5uxwUBfJjnF3RtgncNcftn4b3siNzvBfKEBuhegMeS5YAbBIwABUpR +4mVpm9BLOiX4yENIT6JdUQFc +-----END PRIVATE KEY----- diff --git a/pulsar-config-validation/pom.xml b/pulsar-config-validation/pom.xml index 134e3f147ee06..57c5e04858ea9 100644 --- a/pulsar-config-validation/pom.xml +++ b/pulsar-config-validation/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-functions/api-java/pom.xml b/pulsar-functions/api-java/pom.xml index 68901a3da9cd2..d95667f81ce37 100644 --- a/pulsar-functions/api-java/pom.xml +++ b/pulsar-functions/api-java/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 pulsar-functions-api diff --git a/pulsar-functions/instance/pom.xml b/pulsar-functions/instance/pom.xml index c6f5524e76fb9..705e04e15d03f 100644 --- a/pulsar-functions/instance/pom.xml +++ b/pulsar-functions/instance/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 pulsar-functions-instance @@ -53,6 +53,12 @@ ${project.version} + + ${project.groupId} + pulsar-metadata + ${project.version} + + ${project.groupId} pulsar-io-core @@ -188,6 +194,12 @@ ${prometheus-jmx.version} + + org.awaitility + awaitility + test + + diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/JavaInstanceRunnable.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/JavaInstanceRunnable.java index cfdcb08071846..ce8c9fc9296fe 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/JavaInstanceRunnable.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/JavaInstanceRunnable.java @@ -53,6 +53,7 @@ import org.apache.pulsar.common.functions.ProducerConfig; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.common.util.Reflections; +import org.apache.pulsar.common.nar.FileUtils; import org.apache.pulsar.functions.api.Function; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.api.StateStore; @@ -103,6 +104,7 @@ public class JavaInstanceRunnable implements AutoCloseable, Runnable { private LogAppender logAppender; // provide tables for storing states + private final String stateStorageImplClass; private final String stateStorageServiceUrl; private StateStoreProvider stateStoreProvider; private StateManager stateManager; @@ -144,6 +146,7 @@ public JavaInstanceRunnable(InstanceConfig instanceConfig, ClientBuilder clientBuilder, PulsarClient pulsarClient, PulsarAdmin pulsarAdmin, + String stateStorageImplClass, String stateStorageServiceUrl, SecretsProvider secretsProvider, FunctionCollectorRegistry collectorRegistry, @@ -152,6 +155,7 @@ public JavaInstanceRunnable(InstanceConfig instanceConfig, this.clientBuilder = clientBuilder; this.client = (PulsarClientImpl) pulsarClient; this.pulsarAdmin = pulsarAdmin; + this.stateStorageImplClass = stateStorageImplClass; this.stateStorageServiceUrl = stateStorageServiceUrl; this.secretsProvider = secretsProvider; this.functionClassLoader = functionClassLoader; @@ -322,8 +326,8 @@ private void setupStateStore() throws Exception { if (null == stateStorageServiceUrl) { stateStoreProvider = StateStoreProvider.NULL; } else { - stateStoreProvider = new BKStateStoreProviderImpl(); - Map stateStoreProviderConfig = new HashMap(); + stateStoreProvider = getStateStoreProvider(); + Map stateStoreProviderConfig = new HashMap<>(); stateStoreProviderConfig.put(BKStateStoreProviderImpl.STATE_STORAGE_SERVICE_URL, stateStorageServiceUrl); stateStoreProvider.init(stateStoreProviderConfig, instanceConfig.getFunctionDetails()); @@ -339,6 +343,14 @@ private void setupStateStore() throws Exception { } } + private StateStoreProvider getStateStoreProvider() throws Exception { + if (stateStorageImplClass == null) { + return new BKStateStoreProviderImpl(); + } else { + return (StateStoreProvider) Class.forName(stateStorageImplClass).getConstructor().newInstance(); + } + } + private void handleResult(Record srcRecord, JavaExecutionResult result) throws Exception { if (result.getUserException() != null) { Exception t = result.getUserException(); diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/LogAppender.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/LogAppender.java index 20354d4854a27..1b75eec7522dd 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/LogAppender.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/LogAppender.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.core.ErrorHandler; import org.apache.logging.log4j.core.Layout; import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.DefaultErrorHandler; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.PulsarClient; @@ -35,6 +36,11 @@ * to a log topic. */ public class LogAppender implements Appender { + + private static final String LOG_LEVEL = "loglevel"; + private static final String INSTANCE = "instance"; + private static final String FQN = "fqn"; + private PulsarClient pulsarClient; private String logTopic; private String fqn; @@ -48,15 +54,16 @@ public LogAppender(PulsarClient pulsarClient, String logTopic, String fqn, Strin this.logTopic = logTopic; this.fqn = fqn; this.instance = instance; + this.errorHandler = new DefaultErrorHandler(this); } @Override public void append(LogEvent logEvent) { producer.newMessage() .value(logEvent.getMessage().getFormattedMessage().getBytes(StandardCharsets.UTF_8)) - .property("loglevel", logEvent.getLevel().name()) - .property("instance", instance) - .property("fqn", fqn) + .property(LOG_LEVEL, logEvent.getLevel().name()) + .property(INSTANCE, instance) + .property(FQN, fqn) .sendAsync(); } @@ -82,6 +89,12 @@ public ErrorHandler getHandler() { @Override public void setHandler(ErrorHandler errorHandler) { + if (errorHandler == null) { + throw new RuntimeException("The log error handler cannot be set to null"); + } + if (isStarted()) { + throw new RuntimeException("The log error handler cannot be changed once the appender is started"); + } this.errorHandler = errorHandler; } diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreProviderImpl.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreProviderImpl.java index fd4228a1adfa9..32901bd478aed 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreProviderImpl.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/BKStateStoreProviderImpl.java @@ -56,8 +56,6 @@ @Slf4j public class BKStateStoreProviderImpl implements StateStoreProvider { - public static final String STATE_STORAGE_SERVICE_URL = "stateStorageServiceUrl"; - private String stateStorageServiceUrl; private Map clients; diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreImpl.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreImpl.java new file mode 100644 index 0000000000000..1c1df7f6a6f20 --- /dev/null +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreImpl.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.instance.state; + +import java.nio.ByteBuffer; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.functions.api.StateStoreContext; +import org.apache.pulsar.metadata.api.MetadataCache; +import org.apache.pulsar.metadata.api.MetadataStore; +import org.apache.pulsar.metadata.api.MetadataStoreException; + +public class PulsarMetadataStateStoreImpl implements DefaultStateStore { + + private final MetadataStore store; + private final String prefixPath; + private final MetadataCache countersCache; + + private final String namespace; + private final String tenant; + private final String name; + private final String fqsn; + + PulsarMetadataStateStoreImpl(MetadataStore store, String prefix, String tenant, String namespace, String name) { + this.store = store; + this.tenant = tenant; + this.namespace = namespace; + this.name = name; + this.fqsn = tenant + '/' + namespace + '/' + name; + + this.prefixPath = prefix + '/' + fqsn + '/'; + this.countersCache = store.getMetadataCache(Long.class); + } + + @Override + public String tenant() { + return tenant; + } + + @Override + public String namespace() { + return namespace; + } + + @Override + public String name() { + return name; + } + + @Override + public String fqsn() { + return fqsn; + } + + @Override + public void init(StateStoreContext ctx) { + } + + @Override + public void close() { + } + + @Override + public void put(String key, ByteBuffer value) { + putAsync(key, value).join(); + } + + @Override + public CompletableFuture putAsync(String key, ByteBuffer value) { + byte[] bytes = new byte[value.remaining()]; + value.get(bytes); + return store.put(getPath(key), bytes, Optional.empty()) + .thenApply(__ -> null); + } + + @Override + public void delete(String key) { + deleteAsync(key).join(); + } + + @Override + public CompletableFuture deleteAsync(String key) { + return store.delete(getPath(key), Optional.empty()); + } + + @Override + public ByteBuffer get(String key) { + return getAsync(key).join(); + } + + @Override + public CompletableFuture getAsync(String key) { + return store.get(getPath(key)) + .thenApply(optRes -> + optRes.map(x -> ByteBuffer.wrap(x.getValue())) + .orElse(null)); + } + + @Override + public void incrCounter(String key, long amount) { + incrCounterAsync(key, amount); + } + + @Override + public CompletableFuture incrCounterAsync(String key, long amount) { + return countersCache.readModifyUpdateOrCreate(getPath(key), optValue -> + optValue.orElse(0L) + amount + ).thenApply(__ -> null); + } + + @Override + public long getCounter(String key) { + return getCounterAsync(key).join(); + } + + @Override + public CompletableFuture getCounterAsync(String key) { + return countersCache.get(getPath(key)) + .thenApply(optValue -> optValue.orElse(0L)); + } + + private String getPath(String key) { + return prefixPath + key; + } +} diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreProviderImpl.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreProviderImpl.java new file mode 100644 index 0000000000000..819bfd94cb7a5 --- /dev/null +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreProviderImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.instance.state; + +import java.util.Map; +import lombok.SneakyThrows; +import org.apache.pulsar.functions.proto.Function; +import org.apache.pulsar.metadata.api.MetadataStore; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.MetadataStoreFactory; + +public class PulsarMetadataStateStoreProviderImpl implements StateStoreProvider { + + private static final String METADATA_URL = "METADATA_URL"; + private static final String METADATA_STORE_INSTANCE = "METADATA_STORE_INSTANCE"; + + private static final String METADATA_PREFIX = "METADATA_PREFIX"; + private static final String METADATA_DEFAULT_PREFIX = "/state-store"; + + private MetadataStore store; + private String prefix; + private boolean shouldCloseStore; + + @Override + public void init(Map config, Function.FunctionDetails functionDetails) throws Exception { + + prefix = (String) config.getOrDefault(METADATA_PREFIX, METADATA_DEFAULT_PREFIX); + + if (config.containsKey(METADATA_STORE_INSTANCE)) { + store = (MetadataStore) config.get(METADATA_STORE_INSTANCE); + shouldCloseStore = false; + } else { + String metadataUrl = (String) config.get(METADATA_URL); + store = MetadataStoreFactory.create(metadataUrl, MetadataStoreConfig.builder().build()); + shouldCloseStore = true; + } + } + + @Override + public DefaultStateStore getStateStore(String tenant, String namespace, String name) throws Exception { + return new PulsarMetadataStateStoreImpl(store, prefix, tenant, namespace, name); + } + + @SneakyThrows + @Override + public void close() { + if (shouldCloseStore) { + store.close(); + } + } +} diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/StateStoreProvider.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/StateStoreProvider.java index db3c6b3aedda9..4c01e9db3d438 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/StateStoreProvider.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/state/StateStoreProvider.java @@ -27,6 +27,8 @@ */ public interface StateStoreProvider extends AutoCloseable { + String STATE_STORAGE_SERVICE_URL = "stateStorageServiceUrl"; + /** * The state store provider returns `null` state stores. */ diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/stats/PrometheusTextFormat.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/stats/PrometheusTextFormat.java index f7a205c7db02c..46d232da3e73d 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/stats/PrometheusTextFormat.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/instance/stats/PrometheusTextFormat.java @@ -37,6 +37,11 @@ public static void write004(Writer writer, Enumeration 0) { @@ -64,19 +69,19 @@ private static void writeEscapedLabelValue(Writer writer, String s) throws IOExc for (int i = 0; i < s.length(); i++) { char c = s.charAt(i); switch (c) { - case '\\': - writer.append("\\\\"); - break; - case '\"': - writer.append("\\\""); - break; - case '\n': - writer.append("\\n"); - break; - default: - writer.append(c); + case '\\': + writer.append("\\\\"); + break; + case '\"': + writer.append("\\\""); + break; + case '\n': + writer.append("\\n"); + break; + default: + writer.append(c); } } } -} +} \ No newline at end of file diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WaterMarkEventGenerator.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WaterMarkEventGenerator.java index d9042022b293c..0024586941d48 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WaterMarkEventGenerator.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WaterMarkEventGenerator.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.functions.windowing; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.util.concurrent.ThreadFactoryBuilder; import lombok.extern.slf4j.Slf4j; import org.apache.logging.log4j.ThreadContext; @@ -134,7 +135,9 @@ private void checkFailures() { } public void start() { - this.executorFuture = executorService.scheduleAtFixedRate(this, intervalMs, intervalMs, TimeUnit.MILLISECONDS); + this.executorFuture = + executorService.scheduleAtFixedRate(catchingAndLoggingThrowables(this), intervalMs, intervalMs, + TimeUnit.MILLISECONDS); } public void shutdown() { diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WindowManager.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WindowManager.java index 06e0b88776959..9f7b5bb0a2d5b 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WindowManager.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/WindowManager.java @@ -105,7 +105,9 @@ public void add(T event, long ts, Record record) { public void add(Event windowEvent) { // watermark events are not added to the queue. if (windowEvent.isWatermark()) { - log.debug(String.format("Got watermark event with ts %d", windowEvent.getTimestamp())); + if (log.isDebugEnabled()) { + log.debug("Got watermark event with ts {}", windowEvent.getTimestamp()); + } } else { queue.add(windowEvent); } @@ -145,8 +147,9 @@ public boolean onTrigger() { prevWindowEvents.clear(); if (!events.isEmpty()) { prevWindowEvents.addAll(windowEvents); - log.debug(String.format("invoking windowLifecycleListener onActivation, [%d] events in " - + "window.", events.size())); + if (log.isDebugEnabled()) { + log.debug("invoking windowLifecycleListener onActivation, [{}] events in window.", events.size()); + } windowLifecycleListener.onActivation(events, newEvents, expired, evictionPolicy.getContext().getReferenceTime()); } else { @@ -216,7 +219,9 @@ private List> scanEvents(boolean fullScan) { lock.unlock(); } eventsSinceLastExpiry.set(0); - log.debug(String.format("[%d] events expired from window.", eventsToExpire.size())); + if (log.isDebugEnabled()) { + log.debug("[{}] events expired from window.", eventsToExpire.size()); + } if (!eventsToExpire.isEmpty()) { log.debug("invoking windowLifecycleListener.onExpiry"); windowLifecycleListener.onExpiry(eventsToExpire); diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/TimeTriggerPolicy.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/TimeTriggerPolicy.java index bc40f5329e460..3ae0c23901193 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/TimeTriggerPolicy.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/TimeTriggerPolicy.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.functions.windowing.triggers; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.util.concurrent.ThreadFactoryBuilder; import lombok.extern.slf4j.Slf4j; import org.apache.logging.log4j.ThreadContext; @@ -75,7 +76,9 @@ public void reset() { @Override public void start() { - executorFuture = executor.scheduleAtFixedRate(newTriggerTask(), duration, duration, TimeUnit.MILLISECONDS); + executorFuture = + executor.scheduleAtFixedRate(catchingAndLoggingThrowables(newTriggerTask()), duration, duration, + TimeUnit.MILLISECONDS); } @Override diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkCountTriggerPolicy.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkCountTriggerPolicy.java index 26a9ce737996e..7b60e63e45224 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkCountTriggerPolicy.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkCountTriggerPolicy.java @@ -83,7 +83,7 @@ private void handleWaterMarkEvent(Event waterMarkEvent) { List eventTs = windowManager.getSlidingCountTimestamps(lastProcessedTs, watermarkTs, count); for (long ts : eventTs) { - evictionPolicy.setContext(new DefaultEvictionContext(ts, null, Long.valueOf(count))); + evictionPolicy.setContext(new DefaultEvictionContext(ts, null, (long) count)); handler.onTrigger(); lastProcessedTs = ts; } diff --git a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkTimeTriggerPolicy.java b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkTimeTriggerPolicy.java index 22722bf887a2e..a2bfca689b0fd 100644 --- a/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkTimeTriggerPolicy.java +++ b/pulsar-functions/instance/src/main/java/org/apache/pulsar/functions/windowing/triggers/WatermarkTimeTriggerPolicy.java @@ -80,7 +80,9 @@ public void shutdown() { private void handleWaterMarkEvent(Event event) { long watermarkTs = event.getTimestamp(); long windowEndTs = nextWindowEndTs; - log.debug(String.format("Window end ts %d Watermark ts %d", windowEndTs, watermarkTs)); + if (log.isDebugEnabled()) { + log.debug("Window end ts {} Watermark ts {}", windowEndTs, watermarkTs); + } while (windowEndTs <= watermarkTs) { long currentCount = windowManager.getEventCount(windowEndTs); evictionPolicy.setContext(new DefaultEvictionContext(windowEndTs, currentCount)); @@ -93,10 +95,14 @@ private void handleWaterMarkEvent(Event event) { * window intervals based on event ts. */ long ts = getNextAlignedWindowTs(windowEndTs, watermarkTs); - log.debug(String.format("Next aligned window end ts %d", ts)); + if (log.isDebugEnabled()) { + log.debug("Next aligned window end ts {}", ts); + } if (ts == Long.MAX_VALUE) { - log.debug(String.format("No events to process between %d and watermark ts %d", - windowEndTs, watermarkTs)); + if (log.isDebugEnabled()) { + log.debug("No events to process between {} and watermark ts {}", + windowEndTs, watermarkTs); + } break; } windowEndTs = ts; diff --git a/pulsar-functions/instance/src/main/python/python_instance_main.py b/pulsar-functions/instance/src/main/python/python_instance_main.py index 627013489ab70..0672142e33451 100644 --- a/pulsar-functions/instance/src/main/python/python_instance_main.py +++ b/pulsar-functions/instance/src/main/python/python_instance_main.py @@ -99,7 +99,7 @@ def main(): if os.path.splitext(str(args.py))[1] == '.whl': if args.install_usercode_dependencies: - cmd = "pip install -t %s" % os.path.dirname(str(args.py)) + cmd = "pip install -t %s" % os.path.dirname(os.path.abspath(str(args.py))) if args.dependency_repository: cmd = cmd + " -i %s" % str(args.dependency_repository) if args.extra_dependency_repository: @@ -112,7 +112,7 @@ def main(): else: zpfile = zipfile.ZipFile(str(args.py), 'r') zpfile.extractall(os.path.dirname(str(args.py))) - sys.path.insert(0, os.path.dirname(str(args.py))) + sys.path.insert(0, os.path.dirname(os.path.abspath(str(args.py)))) elif os.path.splitext(str(args.py))[1] == '.zip': # Assumig zip file with format func.zip # extract to folder function @@ -123,21 +123,21 @@ def main(): # run pip install to target folder deps folder zpfile = zipfile.ZipFile(str(args.py), 'r') zpfile.extractall(os.path.dirname(str(args.py))) - basename = os.path.splitext(str(args.py))[0] + basename = os.path.basename(os.path.splitext(str(args.py))[0]) deps_dir = os.path.join(os.path.dirname(str(args.py)), basename, "deps") if os.path.isdir(deps_dir) and os.listdir(deps_dir): # get all wheel files from deps directory wheel_file_list = [os.path.join(deps_dir, f) for f in os.listdir(deps_dir) if os.path.isfile(os.path.join(deps_dir, f)) and os.path.splitext(f)[1] =='.whl'] - cmd = "pip install -t %s --no-index --find-links %s %s" % (os.path.dirname(str(args.py)), deps_dir, " ".join(wheel_file_list)) + cmd = "pip install -t %s --no-index --find-links %s %s" % (os.path.dirname(os.path.abspath(str(args.py))), deps_dir, " ".join(wheel_file_list)) Log.debug("Install python dependencies via cmd: %s" % cmd) retval = os.system(cmd) if retval != 0: print("Could not install user depedencies specified by the zip file") sys.exit(1) # add python user src directory to path - sys.path.insert(0, os.path.join(os.path.dirname(str(args.py)), basename, "src")) + sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(str(args.py))), basename, "src")) log_file = os.path.join(args.logging_directory, util.getFullyQualifiedFunctionName(function_details.tenant, function_details.namespace, function_details.name), diff --git a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceRunnableTest.java b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceRunnableTest.java index 59a4f5b50a618..b96e8134bf9d7 100644 --- a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceRunnableTest.java +++ b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceRunnableTest.java @@ -71,7 +71,7 @@ private JavaInstanceRunnable createRunnable(String outputSerde) throws Exception ClientBuilder clientBuilder = mock(ClientBuilder.class); when(clientBuilder.build()).thenReturn(null); JavaInstanceRunnable javaInstanceRunnable = new JavaInstanceRunnable( - config, clientBuilder, null, null, null, null, null, null); + config, clientBuilder, null, null, null,null, null, null, null); return javaInstanceRunnable; } diff --git a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceTest.java b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceTest.java index 03c0da547bd97..9931ddf4ce839 100644 --- a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceTest.java +++ b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/JavaInstanceTest.java @@ -34,6 +34,7 @@ import org.apache.pulsar.functions.api.Function; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.functions.instance.JavaInstance.AsyncFuncRequest; +import org.testng.Assert; import org.testng.annotations.Test; @Slf4j @@ -226,7 +227,7 @@ public void testAsyncFunctionMaxPending() throws Exception { for (int i = 0; i < 3; i++) { AsyncFuncRequest request = instance.getPendingAsyncRequests().poll(); - assertNotNull(testString + "-lambda", (String) request.getProcessResult().get()); + Assert.assertEquals(request.getProcessResult().get(), testString + "-lambda"); } long endTime = System.currentTimeMillis(); @@ -235,7 +236,6 @@ public void testAsyncFunctionMaxPending() throws Exception { instance.close(); } - @SuppressWarnings("serial") private static class UserException extends Exception { public UserException(String msg) { super(msg); diff --git a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreImplTest.java b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreImplTest.java new file mode 100644 index 0000000000000..a6fb3eb365a06 --- /dev/null +++ b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/instance/state/PulsarMetadataStateStoreImplTest.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.functions.instance.state; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.*; +import io.kubernetes.client.proto.Meta; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import java.nio.ByteBuffer; +import java.util.concurrent.CompletableFuture; +import lombok.SneakyThrows; +import org.apache.bookkeeper.api.kv.Table; +import org.apache.bookkeeper.api.kv.options.Options; +import org.apache.bookkeeper.api.kv.result.DeleteResult; +import org.apache.bookkeeper.common.concurrent.FutureUtils; +import org.apache.pulsar.metadata.api.MetadataCache; +import org.apache.pulsar.metadata.api.MetadataStore; +import org.apache.pulsar.metadata.api.MetadataStoreConfig; +import org.apache.pulsar.metadata.api.MetadataStoreFactory; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +/** + * Unit test {@link BKStateStoreImpl}. + */ +public class PulsarMetadataStateStoreImplTest { + + private static final String TENANT = "test-tenant"; + private static final String NS = "test-ns"; + private static final String NAME = "test-name"; + private static final String FQSN = "test-tenant/test-ns/test-name"; + private static final String PREFIX = "/prefix"; + private static final String PREFIX_PATH = PREFIX + '/' + FQSN + '/'; + + private MetadataStore store; + private MetadataCache countersCache; + private DefaultStateStore stateContext; + + @BeforeMethod + public void setup() throws Exception { + this.store = MetadataStoreFactory.create("memory://local", MetadataStoreConfig.builder().build()); + this.countersCache = store.getMetadataCache(Long.class); + this.stateContext = new PulsarMetadataStateStoreImpl(store, "/prefix", TENANT, NS, NAME); + } + + @AfterMethod + public void cleanup() throws Exception { + this.store.close(); + } + + @Test + public void testGetter() { + assertEquals(stateContext.tenant(), TENANT); + assertEquals(stateContext.namespace(), NS); + assertEquals(stateContext.name(), NAME); + assertEquals(stateContext.fqsn(), FQSN); + } + + @Test + public void testIncr() throws Exception { + stateContext.incrCounter("test-key", 10L); + assertEquals(countersCache.get(PREFIX_PATH + "test-key").join().get().longValue(), 10); + } + + @Test + public void testPut() throws Exception { + stateContext.put("test-key", ByteBuffer.wrap("test-value".getBytes(UTF_8))); + assertEquals(store.get(PREFIX_PATH + "test-key").join().get().getValue(), "test-value".getBytes(UTF_8)); + } + + @Test + public void testDelete() throws Exception { + stateContext.put("test-key", ByteBuffer.wrap("test-value".getBytes(UTF_8))); + assertEquals("test-value".getBytes(UTF_8), store.get(PREFIX_PATH + "test-key").join().get().getValue()); + stateContext.delete("test-key"); + assertFalse(store.get(PREFIX_PATH + "test-key").join().isPresent()); + } + + @Test + public void testGetAmount() throws Exception { + assertEquals(stateContext.getCounter("test-key"), 0); + stateContext.incrCounter("test-key", 10L); + assertEquals(countersCache.get(PREFIX_PATH + "test-key").join().get().longValue(), 10); + assertEquals(stateContext.getCounter("test-key"), 10); + } + + @Test + public void testGetKeyNotPresent() throws Exception { + CompletableFuture result = stateContext.getAsync("test-key"); + assertTrue(result != null); + assertEquals(result.get(), null); + } + +} diff --git a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/source/batch/BatchSourceExecutorTest.java b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/source/batch/BatchSourceExecutorTest.java index ff15a10ebbe86..6715b74624b51 100644 --- a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/source/batch/BatchSourceExecutorTest.java +++ b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/source/batch/BatchSourceExecutorTest.java @@ -19,6 +19,8 @@ package org.apache.pulsar.functions.source.batch; +import static org.awaitility.Awaitility.await; +import static org.testng.Assert.fail; import com.google.gson.Gson; import lombok.Getter; import org.apache.pulsar.client.api.ConsumerBuilder; @@ -44,7 +46,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.LinkedBlockingQueue; import java.util.function.Consumer; -import static org.testng.Assert.fail; /** * Unit tests for {@link org.apache.pulsar.functions.source.batch.BatchSourceExecutor} @@ -368,6 +369,8 @@ public void testLifeCycle() throws Exception { } Assert.assertEquals(testBatchSource.getRecordCount(), 6); Assert.assertEquals(testBatchSource.getDiscoverCount(), 1); + + awaitDiscoverNotInProgress(); triggerQueue.put("trigger"); completedQueue.take(); Assert.assertTrue(testBatchSource.getDiscoverCount() == 2); @@ -387,6 +390,8 @@ public void testPushLifeCycle() throws Exception { } Assert.assertEquals(testBatchPushSource.getRecordCount(), 5); Assert.assertEquals(testBatchPushSource.getDiscoverCount(), 1); + + awaitDiscoverNotInProgress(); triggerQueue.put("trigger"); completedQueue.take(); Assert.assertEquals(testBatchPushSource.getDiscoverCount(), 2); @@ -406,4 +411,8 @@ public void testDiscoveryPhaseError() throws Exception { fail("should have thrown an exception"); } + private void awaitDiscoverNotInProgress() { + await().until(() -> !batchSourceExecutor.discoverInProgress); + } + } \ No newline at end of file diff --git a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/windowing/WindowFunctionExecutorTest.java b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/windowing/WindowFunctionExecutorTest.java index 9e0973370603f..f3502d667b381 100644 --- a/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/windowing/WindowFunctionExecutorTest.java +++ b/pulsar-functions/instance/src/test/java/org/apache/pulsar/functions/windowing/WindowFunctionExecutorTest.java @@ -89,7 +89,7 @@ public long extractTimestamp(Long input) { private static class TestWrongTimestampExtractor implements TimestampExtractor { @Override public long extractTimestamp(String input) { - return Long.valueOf(input); + return Long.parseLong(input); } } diff --git a/pulsar-functions/java-examples/pom.xml b/pulsar-functions/java-examples/pom.xml index 82735a03f0ee8..0d96e2c9bb9ea 100644 --- a/pulsar-functions/java-examples/pom.xml +++ b/pulsar-functions/java-examples/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 pulsar-functions-api-examples diff --git a/pulsar-functions/localrun-shaded/pom.xml b/pulsar-functions/localrun-shaded/pom.xml index ded9d60bed82b..9d081892cbf56 100644 --- a/pulsar-functions/localrun-shaded/pom.xml +++ b/pulsar-functions/localrun-shaded/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -83,7 +83,7 @@ @@ -106,6 +106,40 @@ + + + org.apache.pulsar:* + org.apache.bookkeeper:* + commons-*:* + org.apache.commons:* + com.fasterxml.jackson.*:* + io.netty:* + com.google.*:* + javax.servlet:* + org.reactivestreams:reactive-streams + org.apache.commons:* + io.swagger:* + org.yaml:snakeyaml + io.perfmark:* + io.prometheus:* + io.prometheus.jmx:* + javax.ws.rs:* + org.tukaani:xz + com.github.zafarkhaja:java-semver + net.java.dev.jna:* + org.apache.zookeeper:* + com.thoughtworks.paranamer:paranamer + jline:* + org.rocksdb:* + org.eclipse.jetty*:* + org.apache.avro:avro + com.beust:* + net.jodah:* + io.airlift:* + com.yahoo.datasketches:* + io.netty.resolver:* + + org.apache.pulsar:pulsar-client-original @@ -225,10 +259,10 @@ org.apache.pulsar.functions.runtime.shaded.org.apache.distributedlog - + org.inferred org.apache.pulsar.functions.runtime.shaded.org.inferred @@ -246,10 +280,10 @@ org.apache.pulsar.functions.runtime.shaded.dlshade - + net.java.dev.jna org.apache.pulsar.functions.runtime.shaded.net.java.dev.jna @@ -270,6 +304,10 @@ io.prometheus org.apache.pulsar.functions.runtime.shaded.io.prometheus + + io.prometheus.jmx + org.apache.pulsar.functions.runtime.shaded.io.prometheus.jmx + org.apache.zookeeper org.apache.pulsar.functions.runtime.shaded.org.apache.zookeeper @@ -347,18 +385,22 @@ org.apache.pulsar.functions.runtime.shaded.avo.shaded - com.yahoo - org.apache.pulsar.functions.runtime.shaded.com.yahoo + com.yahoo.datasketches + org.apache.pulsar.shaded.com.yahoo.datasketches + + + com.yahoo.sketches + org.apache.pulsar.shaded.com.yahoo.sketches com.beust org.apache.pulsar.functions.runtime.shaded.com.beust - + org.hamcrest org.apache.pulsar.functions.runtime.shaded.org.hamcrest @@ -382,7 +424,11 @@ --> org.asynchttpclient - org.apache.pulsar.shade.org.asynchttpclient + org.apache.pulsar.functions.runtime.shaded.org.asynchttpclient + + + io.airlift + org.apache.pulsar.functions.runtime.shaded.io.airlift diff --git a/pulsar-functions/runtime/pom.xml b/pulsar-functions/runtime/pom.xml index f3cbcc21f35f4..cf87a3fec237e 100644 --- a/pulsar-functions/runtime/pom.xml +++ b/pulsar-functions/runtime/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 pulsar-functions-runtime diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java index 1881b5556008d..36d063f8f558f 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/JavaInstanceStarter.java @@ -19,6 +19,8 @@ package org.apache.pulsar.functions.runtime; +import static org.apache.pulsar.functions.utils.FunctionCommon.getSinkType; +import static org.apache.pulsar.functions.utils.FunctionCommon.getSourceType; import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.beust.jcommander.converters.StringConverter; @@ -33,6 +35,7 @@ import io.prometheus.client.exporter.HTTPServer; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; +import org.apache.pulsar.common.functions.WindowConfig; import org.apache.pulsar.common.nar.NarClassLoader; import org.apache.pulsar.functions.instance.AuthenticationConfig; import org.apache.pulsar.functions.instance.InstanceCache; @@ -45,13 +48,13 @@ import org.apache.pulsar.functions.secretsprovider.ClearTextSecretsProvider; import org.apache.pulsar.functions.secretsprovider.SecretsProvider; import org.apache.pulsar.common.util.Reflections; - import java.lang.reflect.Type; import java.net.InetSocketAddress; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import org.apache.pulsar.functions.utils.FunctionCommon; @Slf4j @@ -94,6 +97,9 @@ public class JavaInstanceStarter implements AutoCloseable { @Parameter(names = "--tls_trust_cert_path", description = "tls trust cert file path") public String tlsTrustCertFilePath; + @Parameter(names = "--state_storage_impl_class", description = "State Storage Service Implementation class\n", required= false) + public String stateStorageImplClass; + @Parameter(names = "--state_storage_serviceurl", description = "State Storage Service Url\n", required= false) public String stateStorageServiceUrl; @@ -162,6 +168,7 @@ public void start(String[] args, ClassLoader functionInstanceClassLoader, ClassL functionDetailsJsonString = functionDetailsJsonString.substring(0, functionDetailsJsonString.length() - 1); } JsonFormat.parser().merge(functionDetailsJsonString, functionDetailsBuilder); + inferringMissingTypeClassName(functionDetailsBuilder, functionInstanceClassLoader); Function.FunctionDetails functionDetails = functionDetailsBuilder.build(); instanceConfig.setFunctionDetails(functionDetails); instanceConfig.setPort(port); @@ -196,6 +203,7 @@ public void start(String[] args, ClassLoader functionInstanceClassLoader, ClassL RuntimeUtils.registerDefaultCollectors(collectorRegistry); containerFactory = new ThreadRuntimeFactory("LocalRunnerThreadGroup", pulsarServiceUrl, + stateStorageImplClass, stateStorageServiceUrl, AuthenticationConfig.builder().clientAuthenticationPlugin(clientAuthenticationPlugin) .clientAuthenticationParameters(clientAuthenticationParameters).useTls(isTrue(useTls)) @@ -283,6 +291,84 @@ public void close() { } } + private void inferringMissingTypeClassName(Function.FunctionDetails.Builder functionDetailsBuilder, + ClassLoader classLoader) throws ClassNotFoundException { + switch (functionDetailsBuilder.getComponentType()) { + case FUNCTION: + if ((functionDetailsBuilder.hasSource() + && functionDetailsBuilder.getSource().getTypeClassName().isEmpty()) + || (functionDetailsBuilder.hasSink() + && functionDetailsBuilder.getSink().getTypeClassName().isEmpty())) { + Map userConfigs = new Gson().fromJson(functionDetailsBuilder.getUserConfig(), + new TypeToken>() { + }.getType()); + boolean isWindowConfigPresent = userConfigs.containsKey(WindowConfig.WINDOW_CONFIG_KEY); + String className = functionDetailsBuilder.getClassName(); + if (isWindowConfigPresent) { + WindowConfig windowConfig = new Gson().fromJson( + (new Gson().toJson(userConfigs.get(WindowConfig.WINDOW_CONFIG_KEY))), + WindowConfig.class); + className = windowConfig.getActualWindowFunctionClassName(); + } + + Class[] typeArgs = FunctionCommon.getFunctionTypes(classLoader.loadClass(className), + isWindowConfigPresent); + if (functionDetailsBuilder.hasSource() + && functionDetailsBuilder.getSource().getTypeClassName().isEmpty() + && typeArgs[0] != null) { + Function.SourceSpec.Builder sourceBuilder = functionDetailsBuilder.getSource().toBuilder(); + sourceBuilder.setTypeClassName(typeArgs[0].getName()); + functionDetailsBuilder.setSource(sourceBuilder.build()); + } + + if (functionDetailsBuilder.hasSink() + && functionDetailsBuilder.getSink().getTypeClassName().isEmpty() + && typeArgs[1] != null) { + Function.SinkSpec.Builder sinkBuilder = functionDetailsBuilder.getSink().toBuilder(); + sinkBuilder.setTypeClassName(typeArgs[1].getName()); + functionDetailsBuilder.setSink(sinkBuilder.build()); + } + } + break; + case SINK: + if ((functionDetailsBuilder.hasSink() + && functionDetailsBuilder.getSink().getTypeClassName().isEmpty())) { + String typeArg = getSinkType(functionDetailsBuilder.getClassName(), classLoader).getName(); + + Function.SinkSpec.Builder sinkBuilder = + Function.SinkSpec.newBuilder(functionDetailsBuilder.getSink()); + sinkBuilder.setTypeClassName(typeArg); + functionDetailsBuilder.setSink(sinkBuilder); + + Function.SourceSpec sourceSpec = functionDetailsBuilder.getSource(); + if (null == sourceSpec || StringUtils.isEmpty(sourceSpec.getTypeClassName())) { + Function.SourceSpec.Builder sourceBuilder = Function.SourceSpec.newBuilder(sourceSpec); + sourceBuilder.setTypeClassName(typeArg); + functionDetailsBuilder.setSource(sourceBuilder); + } + } + break; + case SOURCE: + if ((functionDetailsBuilder.hasSource() + && functionDetailsBuilder.getSource().getTypeClassName().isEmpty())) { + String typeArg = getSourceType(functionDetailsBuilder.getClassName(), classLoader).getName(); + + Function.SourceSpec.Builder sourceBuilder = + Function.SourceSpec.newBuilder(functionDetailsBuilder.getSource()); + sourceBuilder.setTypeClassName(typeArg); + functionDetailsBuilder.setSource(sourceBuilder); + + Function.SinkSpec sinkSpec = functionDetailsBuilder.getSink(); + if (null == sinkSpec || StringUtils.isEmpty(sinkSpec.getTypeClassName())) { + Function.SinkSpec.Builder sinkBuilder = Function.SinkSpec.newBuilder(sinkSpec); + sinkBuilder.setTypeClassName(typeArg); + functionDetailsBuilder.setSink(sinkBuilder); + } + } + break; + } + } + class InstanceControlImpl extends InstanceControlGrpc.InstanceControlImplBase { private RuntimeSpawner runtimeSpawner; diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeSpawner.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeSpawner.java index 471af68c1652c..60c869795079e 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeSpawner.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeSpawner.java @@ -23,6 +23,7 @@ */ package org.apache.pulsar.functions.runtime; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import java.io.IOException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledFuture; @@ -81,23 +82,25 @@ public void start() throws Exception { // monitor function runtime to make sure it is running. If not, restart the function runtime if (!runtimeFactory.externallyManaged() && instanceLivenessCheckFreqMs > 0) { - processLivenessCheckTimer = InstanceCache.getInstanceCache().getScheduledExecutorService().scheduleAtFixedRate(() -> { - Runtime runtime = RuntimeSpawner.this.runtime; - if (runtime != null && !runtime.isAlive()) { - log.error("{}/{}/{} Function Container is dead with following exception. Restarting.", details.getTenant(), - details.getNamespace(), details.getName(), runtime.getDeathException()); - // Just for the sake of sanity, just destroy the runtime - try { - runtime.stop(); - runtimeDeathException = runtime.getDeathException(); - runtime.start(); - } catch (Exception e) { - log.error("{}/{}/{}-{} Function Restart failed", details.getTenant(), - details.getNamespace(), details.getName(), e, e); - } - numRestarts++; - } - }, instanceLivenessCheckFreqMs, instanceLivenessCheckFreqMs, TimeUnit.MILLISECONDS); + processLivenessCheckTimer = InstanceCache.getInstanceCache().getScheduledExecutorService() + .scheduleAtFixedRate(catchingAndLoggingThrowables(() -> { + Runtime runtime = RuntimeSpawner.this.runtime; + if (runtime != null && !runtime.isAlive()) { + log.error("{}/{}/{} Function Container is dead with following exception. Restarting.", + details.getTenant(), + details.getNamespace(), details.getName(), runtime.getDeathException()); + // Just for the sake of sanity, just destroy the runtime + try { + runtime.stop(); + runtimeDeathException = runtime.getDeathException(); + runtime.start(); + } catch (Exception e) { + log.error("{}/{}/{}-{} Function Restart failed", details.getTenant(), + details.getNamespace(), details.getName(), e, e); + } + numRestarts++; + } + }), instanceLivenessCheckFreqMs, instanceLivenessCheckFreqMs, TimeUnit.MILLISECONDS); } } diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java index 107d5cff9bf11..4e4e2dcc77785 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/RuntimeUtils.java @@ -173,6 +173,9 @@ public static List getGoInstanceCmd(InstanceConfig instanceConfig, if (instanceConfig.getFunctionDetails().getProcessingGuarantees() != null) { goInstanceConfig.setProcessingGuarantees(instanceConfig.getFunctionDetails().getProcessingGuaranteesValue()); } + if (instanceConfig.getFunctionDetails().getRuntime() != null) { + goInstanceConfig.setRuntime(instanceConfig.getFunctionDetails().getRuntimeValue()); + } if (instanceConfig.getFunctionDetails().getSecretsMap() != null) { goInstanceConfig.setSecretsMap(instanceConfig.getFunctionDetails().getSecretsMap()); } @@ -315,6 +318,9 @@ public static List getCmd(InstanceConfig instanceConfig, "%s-%s", instanceConfig.getFunctionDetails().getName(), shardId)); + + args.add("-Dio.netty.tryReflectionSetAccessible=true"); + if (!isEmpty(instanceConfig.getFunctionDetails().getRuntimeFlags())) { for (String runtimeFlagArg : splitRuntimeArgs(instanceConfig.getFunctionDetails().getRuntimeFlags())) { args.add(runtimeFlagArg); diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java index a483bd07fbd00..c443c4b2f5419 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntime.java @@ -149,6 +149,7 @@ public class KubernetesRuntime implements Runtime { private int percentMemoryPadding; private double cpuOverCommitRatio; private double memoryOverCommitRatio; + private int gracePeriodSeconds; private final Optional functionAuthDataCacheProvider; private final AuthenticationConfig authConfig; private Integer grpcPort; @@ -186,6 +187,7 @@ public class KubernetesRuntime implements Runtime { int percentMemoryPadding, double cpuOverCommitRatio, double memoryOverCommitRatio, + int gracePeriodSeconds, Optional functionAuthDataCacheProvider, boolean authenticationEnabled, Integer grpcPort, @@ -212,6 +214,7 @@ public class KubernetesRuntime implements Runtime { this.percentMemoryPadding = percentMemoryPadding; this.cpuOverCommitRatio = cpuOverCommitRatio; this.memoryOverCommitRatio = memoryOverCommitRatio; + this.gracePeriodSeconds = gracePeriodSeconds; this.authenticationEnabled = authenticationEnabled; this.manifestCustomizer = manifestCustomizer; this.functionInstanceClassPath = functionInstanceClassPath; @@ -424,7 +427,11 @@ public void onSuccess(InstanceCommunication.MetricsData t) { @Override public String getPrometheusMetrics() throws IOException { - return RuntimeUtils.getPrometheusMetrics(metricsPort); + if (metricsPort != null) { + return RuntimeUtils.getPrometheusMetrics(metricsPort); + } else { + return null; + } } @Override @@ -567,7 +574,7 @@ private void submitStatefulSet() throws Exception { public void deleteStatefulSet() throws InterruptedException { String statefulSetName = createJobName(instanceConfig.getFunctionDetails(), this.jobName); final V1DeleteOptions options = new V1DeleteOptions(); - options.setGracePeriodSeconds(5L); + options.setGracePeriodSeconds((long)gracePeriodSeconds); options.setPropagationPolicy("Foreground"); String fqfn = FunctionCommon.getFullyQualifiedName(instanceConfig.getFunctionDetails()); @@ -583,8 +590,8 @@ public void deleteStatefulSet() throws InterruptedException { response = appsClient.deleteNamespacedStatefulSetCall( statefulSetName, jobNamespace, null, null, - 5, null, "Foreground", - null, null) + gracePeriodSeconds, null, "Foreground", + options, null) .execute(); } catch (ApiException e) { // if already deleted @@ -735,7 +742,7 @@ public void deleteService() throws InterruptedException { serviceName, jobNamespace, null, null, 0, null, - "Foreground", null, null).execute(); + "Foreground", options, null).execute(); } catch (ApiException e) { // if already deleted if (e.getCode() == HTTP_NOT_FOUND) { @@ -972,10 +979,14 @@ V1StatefulSet createStatefulSet() { } private Map getPrometheusAnnotations() { - final Map annotations = new HashMap<>(); - annotations.put("prometheus.io/scrape", "true"); - annotations.put("prometheus.io/port", String.valueOf(metricsPort)); - return annotations; + if (metricsPort != null) { + final Map annotations = new HashMap<>(); + annotations.put("prometheus.io/scrape", "true"); + annotations.put("prometheus.io/port", String.valueOf(metricsPort)); + return annotations; + } else { + return Collections.emptyMap(); + } } private Map getLabels(Function.FunctionDetails functionDetails) { @@ -1127,20 +1138,23 @@ V1Container getFunctionContainer(List instanceCommand, Function.Resource private List getFunctionContainerPorts() { List ports = new ArrayList<>(); + ports.add(getGRPCPort()); + ports.add(getPrometheusPort()); + return ports; + } + + private V1ContainerPort getGRPCPort() { final V1ContainerPort port = new V1ContainerPort(); port.setName("grpc"); port.setContainerPort(grpcPort); - ports.add(port); - return ports; + return port; } - private List getPrometheusContainerPorts() { - List ports = new ArrayList<>(); + private V1ContainerPort getPrometheusPort() { final V1ContainerPort port = new V1ContainerPort(); port.setName("prometheus"); port.setContainerPort(metricsPort); - ports.add(port); - return ports; + return port; } public static String createJobName(Function.FunctionDetails functionDetails, String jobName) { diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java index 2a47da66ae742..4b2c7e723259c 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactory.java @@ -103,6 +103,7 @@ public class KubernetesRuntimeFactory implements RuntimeFactory { private String narExtractionDirectory; private String functionInstanceClassPath; private String downloadDirectory; + private int gracePeriodSeconds; @ToString.Exclude @EqualsAndHashCode.Exclude @@ -200,6 +201,7 @@ public void initialize(WorkerConfig workerConfig, AuthenticationConfig authentic this.percentMemoryPadding = factoryConfig.getPercentMemoryPadding(); this.cpuOverCommitRatio = factoryConfig.getCpuOverCommitRatio(); this.memoryOverCommitRatio = factoryConfig.getMemoryOverCommitRatio(); + this.gracePeriodSeconds = factoryConfig.getGracePeriodSeconds(); this.pulsarServiceUrl = StringUtils.isEmpty(factoryConfig.getPulsarServiceUrl()) ? workerConfig.getPulsarServiceUrl() : factoryConfig.getPulsarServiceUrl(); this.pulsarAdminUrl = StringUtils.isEmpty(factoryConfig.getPulsarAdminUrl()) @@ -288,6 +290,11 @@ public KubernetesRuntime createContainer(InstanceConfig instanceConfig, String c String overriddenNamespace = manifestCustomizer.map((customizer) -> customizer.customizeNamespace(instanceConfig.getFunctionDetails(), jobNamespace)).orElse(jobNamespace); String overriddenName = manifestCustomizer.map((customizer) -> customizer.customizeName(instanceConfig.getFunctionDetails(), jobName)).orElse(jobName); + // pass metricsPort configured in functionRuntimeFactoryConfigs.metricsPort in functions_worker.yml + if (metricsPort != null) { + instanceConfig.setMetricsPort(metricsPort); + } + return new KubernetesRuntime( appsClient, coreClient, @@ -318,6 +325,7 @@ public KubernetesRuntime createContainer(InstanceConfig instanceConfig, String c percentMemoryPadding, cpuOverCommitRatio, memoryOverCommitRatio, + gracePeriodSeconds, authProvider, authenticationEnabled, grpcPort, @@ -348,7 +356,7 @@ public void setupClient() throws Exception { if (k8Uri == null) { log.info("k8Uri is null thus going by defaults"); ApiClient cli; - if (submittingInsidePod) { + if (submittingInsidePod != null && submittingInsidePod) { log.info("Looks like we are inside a k8 pod ourselves. Initializing as cluster"); cli = Config.fromCluster(); } else { diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryConfig.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryConfig.java index a3ef41895aa86..e3b758f479c1c 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryConfig.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryConfig.java @@ -161,5 +161,11 @@ public class KubernetesRuntimeFactoryConfig { doc = "The classpath where function instance files stored" ) private String functionInstanceClassPath = ""; + @FieldContext( + doc = "The duration in seconds before the StatefulSet deleted on function stop/restart. " + + "Value must be non-negative integer. The value zero indicates delete immediately. " + + "Default is 5 seconds." + ) + protected int gracePeriodSeconds = 5; } diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/process/ProcessRuntime.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/process/ProcessRuntime.java index 7d585bcd77b22..6135b6b680688 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/process/ProcessRuntime.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/process/ProcessRuntime.java @@ -19,6 +19,7 @@ package org.apache.pulsar.functions.runtime.process; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; @@ -180,16 +181,17 @@ public void start() { .build(); stub = InstanceControlGrpc.newFutureStub(channel); - timer = InstanceCache.getInstanceCache().getScheduledExecutorService().scheduleAtFixedRate(() -> { - CompletableFuture result = healthCheck(); - try { - result.get(); - } catch (Exception e) { - log.error("Health check failed for {}-{}", - instanceConfig.getFunctionDetails().getName(), - instanceConfig.getInstanceId(), e); - } - }, expectedHealthCheckInterval, expectedHealthCheckInterval, TimeUnit.SECONDS); + timer = InstanceCache.getInstanceCache().getScheduledExecutorService() + .scheduleAtFixedRate(catchingAndLoggingThrowables(() -> { + CompletableFuture result = healthCheck(); + try { + result.get(); + } catch (Exception e) { + log.error("Health check failed for {}-{}", + instanceConfig.getFunctionDetails().getName(), + instanceConfig.getInstanceId(), e); + } + }), expectedHealthCheckInterval, expectedHealthCheckInterval, TimeUnit.SECONDS); } } diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java index b6dd019140e76..950f48bdb0e7c 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntime.java @@ -19,6 +19,7 @@ package org.apache.pulsar.functions.runtime.thread; +import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; @@ -26,13 +27,13 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; -import io.prometheus.client.CollectorRegistry; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.common.nar.FileUtils; import org.apache.pulsar.functions.instance.InstanceConfig; import org.apache.pulsar.functions.instance.InstanceUtils; import org.apache.pulsar.functions.instance.stats.FunctionCollectorRegistry; @@ -66,6 +67,7 @@ public class ThreadRuntime implements Runtime { private ClientBuilder clientBuilder; private PulsarClient pulsarClient; private PulsarAdmin pulsarAdmin; + private String stateStorageImplClass; private String stateStorageServiceUrl; private SecretsProvider secretsProvider; private FunctionCollectorRegistry collectorRegistry; @@ -79,6 +81,7 @@ public class ThreadRuntime implements Runtime { PulsarClient client, ClientBuilder clientBuilder, PulsarAdmin pulsarAdmin, + String stateStorageImplClass, String stateStorageServiceUrl, SecretsProvider secretsProvider, FunctionCollectorRegistry collectorRegistry, @@ -95,6 +98,7 @@ public class ThreadRuntime implements Runtime { this.clientBuilder = clientBuilder; this.pulsarClient = client; this.pulsarAdmin = pulsarAdmin; + this.stateStorageImplClass = stateStorageImplClass; this.stateStorageServiceUrl = stateStorageServiceUrl; this.secretsProvider = secretsProvider; this.collectorRegistry = collectorRegistry; @@ -132,14 +136,24 @@ private static ClassLoader loadJars(String jarFile, return Thread.currentThread().getContextClassLoader(); } ClassLoader fnClassLoader; - try { - log.info("Load JAR: {}", jarFile); - // Let's first try to treat it as a nar archive - fnCache.registerFunctionInstanceWithArchive( - instanceConfig.getFunctionId(), - instanceConfig.getInstanceName(), - jarFile, narExtractionDirectory); - } catch (FileNotFoundException e) { + boolean loadedAsNar = false; + if (FileUtils.mayBeANarArchive(new File(jarFile))) { + try { + log.info("Trying Loading file as NAR file: {}", jarFile); + // Let's first try to treat it as a nar archive + fnCache.registerFunctionInstanceWithArchive( + instanceConfig.getFunctionId(), + instanceConfig.getInstanceName(), + jarFile, narExtractionDirectory); + loadedAsNar = true; + } catch (FileNotFoundException e) { + // this is usually like + // java.io.FileNotFoundException: /tmp/pulsar-nar/xxx.jar-unpacked/xxxxx/META-INF/MANIFEST.MF' + log.error("The file {} does not look like a .nar file", jarFile, e.toString()); + } + } + if (!loadedAsNar) { + log.info("Load file as simple JAR file: {}", jarFile); // create the function class loader fnCache.registerFunctionInstance( instanceConfig.getFunctionId(), @@ -174,6 +188,7 @@ public void start() throws Exception { clientBuilder, pulsarClient, pulsarAdmin, + stateStorageImplClass, stateStorageServiceUrl, secretsProvider, collectorRegistry, diff --git a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java index 864a067c1be2c..1e8c96a7ab66d 100644 --- a/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java +++ b/pulsar-functions/runtime/src/main/java/org/apache/pulsar/functions/runtime/thread/ThreadRuntimeFactory.java @@ -63,6 +63,7 @@ public class ThreadRuntimeFactory implements RuntimeFactory { private ClientBuilder clientBuilder; private PulsarClient pulsarClient; private PulsarAdmin pulsarAdmin; + private String stateStorageImplClass; private String storageServiceUrl; private SecretsProvider defaultSecretsProvider; private FunctionCollectorRegistry collectorRegistry; @@ -76,21 +77,27 @@ public class ThreadRuntimeFactory implements RuntimeFactory { * This constructor is used by other runtimes (e.g. ProcessRuntime and KubernetesRuntime) that rely on ThreadRuntime to actually run an instance of the function. * When used by other runtimes, the arguments such as secretsProvider and rootClassLoader will be provided. */ - public ThreadRuntimeFactory(String threadGroupName, String pulsarServiceUrl, String storageServiceUrl, + public ThreadRuntimeFactory(String threadGroupName, String pulsarServiceUrl, + String stateStorageImplClass, + String storageServiceUrl, AuthenticationConfig authConfig, SecretsProvider secretsProvider, FunctionCollectorRegistry collectorRegistry, String narExtractionDirectory, ClassLoader rootClassLoader, boolean exposePulsarAdminClientEnabled, String pulsarWebServiceUrl) throws Exception { initialize(threadGroupName, Optional.empty(), pulsarServiceUrl, authConfig, - storageServiceUrl, null, secretsProvider, collectorRegistry, narExtractionDirectory, + stateStorageImplClass, storageServiceUrl, null, secretsProvider, collectorRegistry, + narExtractionDirectory, rootClassLoader, exposePulsarAdminClientEnabled, pulsarWebServiceUrl, Optional.empty()); } - private void initialize(String threadGroupName, Optional memoryLimit, String pulsarServiceUrl, AuthenticationConfig authConfig, String storageServiceUrl, + private void initialize(String threadGroupName, Optional memoryLimit, + String pulsarServiceUrl, AuthenticationConfig authConfig, String stateStorageImplClass, + String storageServiceUrl, SecretsProviderConfigurator secretsProviderConfigurator, SecretsProvider secretsProvider, FunctionCollectorRegistry collectorRegistry, String narExtractionDirectory, ClassLoader rootClassLoader, boolean exposePulsarAdminClientEnabled, - String pulsarWebServiceUrl, Optional connectorsManager) throws PulsarClientException { + String pulsarWebServiceUrl, Optional connectorsManager) + throws PulsarClientException { if (rootClassLoader == null) { rootClassLoader = Thread.currentThread().getContextClassLoader(); @@ -104,6 +111,7 @@ private void initialize(String threadGroupName, Optional webServiceTlsProtocols = new TreeSet<>(); + + @FieldContext( + category = CATEGORY_WORKER_SECURITY, + doc = "Specify the tls cipher the proxy's web service will use to negotiate during TLS Handshake.\n\n" + + "Example:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]" + ) + private Set webServiceTlsCiphers = new TreeSet<>(); + @FieldContext( category = CATEGORY_WORKER_SECURITY, doc = "Enforce authentication" diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/RuntimeUtilsTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/RuntimeUtilsTest.java index f8bbbc4a883af..bc00776c78ebd 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/RuntimeUtilsTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/RuntimeUtilsTest.java @@ -99,6 +99,7 @@ public void getGoInstanceCmd(boolean k8sRuntime) throws IOException { .setName("go-func") .setLogTopic("go-func-log") .setProcessingGuarantees(Function.ProcessingGuarantees.ATLEAST_ONCE) + .setRuntime(Function.FunctionDetails.Runtime.GO) .setSecretsMap(secretsMap.toJSONString()) .setParallelism(1) .setSource(sources) @@ -137,7 +138,7 @@ public void getGoInstanceCmd(boolean k8sRuntime) throws IOException { Assert.assertEquals(goInstanceConfig.get("autoAck"), true); Assert.assertEquals(goInstanceConfig.get("regexPatternSubscription"), false); Assert.assertEquals(goInstanceConfig.get("pulsarServiceURL"), "pulsar://localhost:6650"); - Assert.assertEquals(goInstanceConfig.get("runtime"), 0); + Assert.assertEquals(goInstanceConfig.get("runtime"), 3); Assert.assertEquals(goInstanceConfig.get("cpu"), 2.0); Assert.assertEquals(goInstanceConfig.get("funcID"), "func-7734"); Assert.assertEquals(goInstanceConfig.get("funcVersion"), "1.0.0"); diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java index 1b8946b64b41d..dc0119cab4625 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeFactoryTest.java @@ -333,7 +333,7 @@ public void testValidateResourcesGranularityAndProportion() throws Exception { "Per instance ram requested, 0, for function should be positive and a multiple of the granularity, 1000"); } - public void testAuthProvider(Optional authProvider) throws Exception { + private void testAuthProvider(Optional authProvider) throws Exception { factory = createKubernetesRuntimeFactory(null, null, null, null, false, authProvider, Optional.empty()); } diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java index 9f064f511f050..be9c52cb33ffb 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/kubernetes/KubernetesRuntimeTest.java @@ -29,12 +29,14 @@ import io.kubernetes.client.custom.Quantity; import io.kubernetes.client.openapi.models.V1Container; import io.kubernetes.client.openapi.models.V1PodSpec; +import io.kubernetes.client.openapi.models.V1PodTemplateSpec; import io.kubernetes.client.openapi.models.V1ResourceRequirements; import io.kubernetes.client.openapi.models.V1Service; import io.kubernetes.client.openapi.models.V1StatefulSet; import io.kubernetes.client.openapi.models.V1Toleration; import org.apache.commons.lang.StringUtils; import org.apache.pulsar.common.util.ObjectMapperFactory; +import org.apache.pulsar.functions.instance.AuthenticationConfig; import org.apache.pulsar.functions.instance.InstanceConfig; import org.apache.pulsar.functions.proto.Function; import org.apache.pulsar.functions.proto.Function.ConsumerSpec; @@ -42,6 +44,7 @@ import org.apache.pulsar.functions.runtime.RuntimeCustomizer; import org.apache.pulsar.functions.runtime.thread.ThreadRuntime; import org.apache.pulsar.functions.secretsprovider.ClearTextSecretsProvider; +import org.apache.pulsar.functions.secretsproviderconfigurator.DefaultSecretsProviderConfigurator; import org.apache.pulsar.functions.secretsproviderconfigurator.SecretsProviderConfigurator; import org.apache.pulsar.functions.utils.FunctionCommon; import org.apache.pulsar.functions.worker.ConnectorsManager; @@ -66,6 +69,7 @@ import static org.powermock.api.mockito.PowerMockito.doNothing; import static org.powermock.api.mockito.PowerMockito.spy; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertThrows; import static org.testng.Assert.assertTrue; @@ -358,7 +362,8 @@ public void testResources() throws Exception { testResources(1.0 / 1.5, 1000, 1.3, 1.0); } - public void testResources(double userCpuRequest, long userMemoryRequest, double cpuOverCommitRatio, double memoryOverCommitRatio) throws Exception { + private void testResources(double userCpuRequest, long userMemoryRequest, double cpuOverCommitRatio, + double memoryOverCommitRatio) throws Exception { Function.Resources resources = Function.Resources.newBuilder() .setRam(userMemoryRequest).setCpu(userCpuRequest).setDisk(10000L).build(); @@ -396,14 +401,14 @@ private void verifyJavaInstance(InstanceConfig config, String depsDir, boolean s if (null != depsDir) { extraDepsEnv = " -Dpulsar.functions.extra.dependencies.dir=" + depsDir; classpath = classpath + ":" + depsDir + "/*"; - totalArgs = 39; + totalArgs = 40; portArg = 26; metricsPortArg = 28; } else { extraDepsEnv = ""; portArg = 25; metricsPortArg = 27; - totalArgs = 38; + totalArgs = 39; } if (secretsAttached) { totalArgs += 4; @@ -433,7 +438,7 @@ private void verifyJavaInstance(InstanceConfig config, String depsDir, boolean s + " -Dlog4j.configurationFile=kubernetes_instance_log4j2.xml " + "-Dpulsar.function.log.dir=" + logDirectory + "/" + FunctionCommon.getFullyQualifiedName(config.getFunctionDetails()) + " -Dpulsar.function.log.file=" + config.getFunctionDetails().getName() + "-$SHARD_ID" - + " -Xmx" + String.valueOf(RESOURCES.getRam()) + + " -Dio.netty.tryReflectionSetAccessible=true -Xmx" + String.valueOf(RESOURCES.getRam()) + " org.apache.pulsar.functions.instance.JavaInstanceMain" + " --jar " + jarLocation + " --instance_id " + "$SHARD_ID" + " --function_id " + config.getFunctionId() @@ -875,7 +880,7 @@ private void verifyGolangInstance(InstanceConfig config) throws Exception { assertEquals(goInstanceConfig.get("autoAck"), false); assertEquals(goInstanceConfig.get("regexPatternSubscription"), false); assertEquals(goInstanceConfig.get("pulsarServiceURL"), pulsarServiceUrl); - assertEquals(goInstanceConfig.get("runtime"), 0); + assertEquals(goInstanceConfig.get("runtime"), 3); assertEquals(goInstanceConfig.get("cpu"), 1.0); assertEquals(goInstanceConfig.get("funcVersion"), "1.0"); assertEquals(goInstanceConfig.get("disk"), 10000); @@ -1144,4 +1149,52 @@ public void testCustomKubernetesDownloadCommandsWithDownloadDirectoryDefined() t String containerCommand = spec.getSpec().getTemplate().getSpec().getContainers().get(0).getCommand().get(2); assertTrue(containerCommand.contains(expectedDownloadCommand)); } + + @Test + public void shouldUseConfiguredMetricsPort() throws Exception { + assertMetricsPortConfigured(Collections.singletonMap("metricsPort", 12345), 12345); + } + + @Test + public void shouldUseDefaultMetricsPortWhenMetricsPortIsntSet() throws Exception { + assertMetricsPortConfigured(Collections.emptyMap(), 9094); + } + + @Test + public void shouldNotAddPrometheusAnnotationIfMetricsPortIsSetToEmpty() throws Exception { + assertMetricsPortConfigured(Collections.singletonMap("metricsPort", ""), -1); + } + + private void assertMetricsPortConfigured(Map functionRuntimeFactoryConfigs, + int expectedPort) throws Exception { + KubernetesRuntimeFactory kubernetesRuntimeFactory = new KubernetesRuntimeFactory(); + WorkerConfig workerConfig = new WorkerConfig(); + workerConfig.setFunctionRuntimeFactoryClassName(KubernetesRuntimeFactory.class.getName()); + workerConfig.setFunctionRuntimeFactoryConfigs(functionRuntimeFactoryConfigs); + AuthenticationConfig authenticationConfig = AuthenticationConfig.builder().build(); + kubernetesRuntimeFactory.initialize(workerConfig, authenticationConfig, new DefaultSecretsProviderConfigurator(), Mockito.mock(ConnectorsManager.class), Optional.empty(), Optional.empty()); + InstanceConfig config = createJavaInstanceConfig(FunctionDetails.Runtime.JAVA, true); + KubernetesRuntime container = kubernetesRuntimeFactory.createContainer(config, userJarFile, userJarFile, 30l); + V1PodTemplateSpec template = container.createStatefulSet().getSpec().getTemplate(); + Map annotations = + template.getMetadata().getAnnotations(); + if (expectedPort != -1) { + // metrics port should be passed to k8s annotation for prometheus scraping + assertEquals(annotations.get("prometheus.io/port"), String.valueOf(expectedPort)); + // scraping annotation should exist + assertEquals(annotations.get("prometheus.io/scrape"), "true"); + + // metrics port should be passed to JavaInstanceStarter with --metrics_port argument + assertTrue(container.getProcessArgs().stream().collect(Collectors.joining(" ")) + .contains("--metrics_port " + expectedPort)); + } else { + // No prometheus annotations should exist + assertFalse(annotations.containsKey("prometheus.io/scrape")); + assertFalse(annotations.containsKey("prometheus.io/port")); + // metrics will be started on random port when the port isn't specified + // check that "--metrics_port 0" argument is passed + assertTrue(container.getProcessArgs().stream().collect(Collectors.joining(" ")) + .contains("--metrics_port 0")); + } + } } diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/process/ProcessRuntimeTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/process/ProcessRuntimeTest.java index 4751ecd7778d1..6cddf3822d01a 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/process/ProcessRuntimeTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/runtime/process/ProcessRuntimeTest.java @@ -298,7 +298,7 @@ private void verifyJavaInstance(InstanceConfig config, Path depsDir, String webS String extraDepsEnv; int portArg; int metricsPortArg; - int totalArgCount = 41; + int totalArgCount = 42; if (webServiceUrl != null && config.isExposePulsarAdminClientEnabled()) { totalArgCount += 3; } @@ -306,13 +306,13 @@ private void verifyJavaInstance(InstanceConfig config, Path depsDir, String webS assertEquals(args.size(), totalArgCount); extraDepsEnv = " -Dpulsar.functions.extra.dependencies.dir=" + depsDir.toString(); classpath = classpath + ":" + depsDir + "/*"; - portArg = 24; - metricsPortArg = 26; + portArg = 25; + metricsPortArg = 27; } else { assertEquals(args.size(), totalArgCount-1); extraDepsEnv = ""; - portArg = 23; - metricsPortArg = 25; + portArg = 24; + metricsPortArg = 26; } if (webServiceUrl != null && config.isExposePulsarAdminClientEnabled()) { portArg += 3; @@ -328,6 +328,7 @@ private void verifyJavaInstance(InstanceConfig config, Path depsDir, String webS + " -Dlog4j.configurationFile=java_instance_log4j2.xml " + "-Dpulsar.function.log.dir=" + logDirectory + "/functions/" + FunctionCommon.getFullyQualifiedName(config.getFunctionDetails()) + " -Dpulsar.function.log.file=" + config.getFunctionDetails().getName() + "-" + config.getInstanceId() + + " -Dio.netty.tryReflectionSetAccessible=true" + " org.apache.pulsar.functions.instance.JavaInstanceMain" + " --jar " + userJarFile + " --instance_id " + config.getInstanceId() + " --function_id " + config.getFunctionId() diff --git a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/worker/WorkerApiV2ResourceConfigTest.java b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/worker/WorkerApiV2ResourceConfigTest.java index 6cfaea9e2427a..bf45dd958c823 100644 --- a/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/worker/WorkerApiV2ResourceConfigTest.java +++ b/pulsar-functions/runtime/src/test/java/org/apache/pulsar/functions/worker/WorkerApiV2ResourceConfigTest.java @@ -25,10 +25,11 @@ import static org.testng.Assert.assertTrue; import java.net.URL; +import java.util.Locale; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.pulsar.functions.auth.KubernetesSecretsTokenAuthProvider; import org.apache.pulsar.functions.runtime.kubernetes.KubernetesRuntimeFactory; -import org.apache.pulsar.functions.worker.WorkerConfig; import org.testng.annotations.Test; /** @@ -121,4 +122,19 @@ public void testLoadResourceRestrictionsConfig() throws Exception { assertTrue(newK8SWc.isFunctionInstanceResourceChangeInLockStep()); } + + @Test + public void testPasswordsNotLeakedOnToString() throws Exception { + URL yamlUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); + WorkerConfig wc = WorkerConfig.load(yamlUrl.toURI().getPath()); + assertFalse(wc.toString().toLowerCase(Locale.ROOT).contains("password"), "Stringified config must not contain password"); + } + + @Test + public void testPasswordsPresentOnObjectMapping() throws Exception { + URL yamlUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); + WorkerConfig wc = WorkerConfig.load(yamlUrl.toURI().getPath()); + assertTrue((new ObjectMapper().writeValueAsString(wc)).toLowerCase(Locale.ROOT).contains("password"), + "ObjectMapper output must include passwords for proper serialization"); + } } diff --git a/pulsar-functions/secrets/pom.xml b/pulsar-functions/secrets/pom.xml index 6c56b59c9702c..062347816d9fd 100644 --- a/pulsar-functions/secrets/pom.xml +++ b/pulsar-functions/secrets/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 pulsar-functions-secrets diff --git a/pulsar-functions/secrets/src/test/java/org/apache/pulsar/functions/secretsprovider/EnvironmentBasedSecretsProviderTest.java b/pulsar-functions/secrets/src/test/java/org/apache/pulsar/functions/secretsprovider/EnvironmentBasedSecretsProviderTest.java index 1fc76b337a5fd..22ef2dd9e6093 100644 --- a/pulsar-functions/secrets/src/test/java/org/apache/pulsar/functions/secretsprovider/EnvironmentBasedSecretsProviderTest.java +++ b/pulsar-functions/secrets/src/test/java/org/apache/pulsar/functions/secretsprovider/EnvironmentBasedSecretsProviderTest.java @@ -21,10 +21,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; - -import java.lang.reflect.Field; -import java.util.Map; - +import com.github.stefanbirkner.systemlambda.SystemLambda; import org.testng.annotations.Test; public class EnvironmentBasedSecretsProviderTest { @@ -32,38 +29,8 @@ public class EnvironmentBasedSecretsProviderTest { public void testConfigValidation() throws Exception { EnvironmentBasedSecretsProvider provider = new EnvironmentBasedSecretsProvider(); assertNull(provider.provideSecret("mySecretName", "Ignored")); - injectEnvironmentVariable("mySecretName", "SecretValue"); - assertEquals(provider.provideSecret("mySecretName", "Ignored"), "SecretValue"); - } - - private static void injectEnvironmentVariable(String key, String value) - throws Exception { - - Class processEnvironment = Class.forName("java.lang.ProcessEnvironment"); - - Field unmodifiableMapField = getAccessibleField(processEnvironment, "theUnmodifiableEnvironment"); - Object unmodifiableMap = unmodifiableMapField.get(null); - injectIntoUnmodifiableMap(key, value, unmodifiableMap); - - Field mapField = getAccessibleField(processEnvironment, "theEnvironment"); - Map map = (Map) mapField.get(null); - map.put(key, value); - } - - private static Field getAccessibleField(Class clazz, String fieldName) - throws NoSuchFieldException { - - Field field = clazz.getDeclaredField(fieldName); - field.setAccessible(true); - return field; - } - - private static void injectIntoUnmodifiableMap(String key, String value, Object map) - throws ReflectiveOperationException { - - Class unmodifiableMap = Class.forName("java.util.Collections$UnmodifiableMap"); - Field field = getAccessibleField(unmodifiableMap, "m"); - Object obj = field.get(map); - ((Map) obj).put(key, value); + SystemLambda.withEnvironmentVariable("mySecretName", "SecretValue").execute(() -> { + assertEquals(provider.provideSecret("mySecretName", "Ignored"), "SecretValue"); + }); } } diff --git a/pulsar-functions/src/test/resources/test_worker_config.yml b/pulsar-functions/src/test/resources/test_worker_config.yml index 4614ca3cfd1c2..f0ecf2bd71bc6 100644 --- a/pulsar-functions/src/test/resources/test_worker_config.yml +++ b/pulsar-functions/src/test/resources/test_worker_config.yml @@ -23,4 +23,7 @@ pulsarServiceUrl: pulsar://localhost:6650 functionMetadataTopicName: test-function-metadata-topic numFunctionPackageReplicas: 3 maxPendingAsyncRequests: 200 +properties: + # Fake Bookkeeper Client config to be applied to the DLog Bookkeeper Client + bookkeeper_testKey: "fakeValue" diff --git a/pulsar-functions/utils/pom.xml b/pulsar-functions/utils/pom.xml index 5611acd316485..27713485015fa 100644 --- a/pulsar-functions/utils/pom.xml +++ b/pulsar-functions/utils/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 pulsar-functions-utils diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java index a13695e6fbcc6..f72814df9905c 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionCommon.java @@ -382,97 +382,114 @@ public static ClassLoader getClassLoaderFromPackage( String narExtractionDirectory) { String connectorClassName = className; ClassLoader jarClassLoader = null; + boolean keepJarClassLoader = false; ClassLoader narClassLoader = null; + boolean keepNarClassLoader = false; Exception jarClassLoaderException = null; Exception narClassLoaderException = null; try { - jarClassLoader = ClassLoaderUtils.extractClassLoader(packageFile); - } catch (Exception e) { - jarClassLoaderException = e; - } - try { - narClassLoader = FunctionCommon.extractNarClassLoader(packageFile, narExtractionDirectory); - } catch (Exception e) { - narClassLoaderException = e; - } - - // if connector class name is not provided, we can only try to load archive as a NAR - if (isEmpty(connectorClassName)) { - if (narClassLoader == null) { - throw new IllegalArgumentException(String.format("%s package does not have the correct format. " + - "Pulsar cannot determine if the package is a NAR package or JAR package. " + - "%s classname is not provided and attempts to load it as a NAR package produced the following error.", - capFirstLetter(componentType), capFirstLetter(componentType)), - narClassLoaderException); - } try { - if (componentType == org.apache.pulsar.functions.proto.Function.FunctionDetails.ComponentType.SOURCE) { - connectorClassName = ConnectorUtils.getIOSourceClass((NarClassLoader) narClassLoader); - } else { - connectorClassName = ConnectorUtils.getIOSinkClass((NarClassLoader) narClassLoader); - } - } catch (IOException e) { - throw new IllegalArgumentException(String.format("Failed to extract %s class from archive", - componentType.toString().toLowerCase()), e); + jarClassLoader = ClassLoaderUtils.extractClassLoader(packageFile); + } catch (Exception e) { + jarClassLoaderException = e; } - try { - narClassLoader.loadClass(connectorClassName); - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", capFirstLetter(componentType), connectorClassName), e); + narClassLoader = FunctionCommon.extractNarClassLoader(packageFile, narExtractionDirectory); + } catch (Exception e) { + narClassLoaderException = e; } - } else { - // if connector class name is provided, we need to try to load it as a JAR and as a NAR. - if (jarClassLoader != null) { + // if connector class name is not provided, we can only try to load archive as a NAR + if (isEmpty(connectorClassName)) { + if (narClassLoader == null) { + throw new IllegalArgumentException(String.format("%s package does not have the correct format. " + + "Pulsar cannot determine if the package is a NAR package or JAR package. " + + "%s classname is not provided and attempts to load it as a NAR package produced " + + "the following error.", + capFirstLetter(componentType), capFirstLetter(componentType)), + narClassLoaderException); + } try { - jarClassLoader.loadClass(connectorClassName); - return jarClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e) { - // class not found in JAR try loading as a NAR and searching for the class - if (narClassLoader != null) { - - try { - narClassLoader.loadClass(connectorClassName); - return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e1) { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", - capFirstLetter(componentType), connectorClassName), e1); - } + if (componentType == org.apache.pulsar.functions.proto.Function.FunctionDetails.ComponentType.SOURCE) { + connectorClassName = ConnectorUtils.getIOSourceClass((NarClassLoader) narClassLoader); } else { - throw new IllegalArgumentException( - String.format("%s class %s must be in class path", capFirstLetter(componentType), - connectorClassName), e); + connectorClassName = ConnectorUtils.getIOSinkClass((NarClassLoader) narClassLoader); } + } catch (IOException e) { + throw new IllegalArgumentException(String.format("Failed to extract %s class from archive", + componentType.toString().toLowerCase()), e); } - } else if (narClassLoader != null) { + try { narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; return narClassLoader; - } catch (ClassNotFoundException | NoClassDefFoundError e1) { + } catch (ClassNotFoundException | NoClassDefFoundError e) { throw new IllegalArgumentException( - String.format("%s class %s must be in class path", - capFirstLetter(componentType), connectorClassName), e1); + String.format("%s class %s must be in class path", capFirstLetter(componentType), + connectorClassName), e); } + } else { - StringBuilder errorMsg = new StringBuilder(capFirstLetter(componentType) - + " package does not have the correct format." - + " Pulsar cannot determine if the package is a NAR package or JAR package."); + // if connector class name is provided, we need to try to load it as a JAR and as a NAR. + if (jarClassLoader != null) { + try { + jarClassLoader.loadClass(connectorClassName); + keepJarClassLoader = true; + return jarClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e) { + // class not found in JAR try loading as a NAR and searching for the class + if (narClassLoader != null) { + + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e1) { + throw new IllegalArgumentException( + String.format("%s class %s must be in class path", + capFirstLetter(componentType), connectorClassName), e1); + } + } else { + throw new IllegalArgumentException( + String.format("%s class %s must be in class path", capFirstLetter(componentType), + connectorClassName), e); + } + } + } else if (narClassLoader != null) { + try { + narClassLoader.loadClass(connectorClassName); + keepNarClassLoader = true; + return narClassLoader; + } catch (ClassNotFoundException | NoClassDefFoundError e1) { + throw new IllegalArgumentException( + String.format("%s class %s must be in class path", + capFirstLetter(componentType), connectorClassName), e1); + } + } else { + StringBuilder errorMsg = new StringBuilder(capFirstLetter(componentType) + + " package does not have the correct format." + + " Pulsar cannot determine if the package is a NAR package or JAR package."); - if (jarClassLoaderException != null) { - errorMsg.append(" Attempts to load it as a JAR package produced error: " + jarClassLoaderException.getMessage()); - } + if (jarClassLoaderException != null) { + errorMsg.append(" Attempts to load it as a JAR package produced error: " + jarClassLoaderException.getMessage()); + } - if (narClassLoaderException != null) { - errorMsg.append(" Attempts to load it as a NAR package produced error: " + narClassLoaderException.getMessage()); - } + if (narClassLoaderException != null) { + errorMsg.append(" Attempts to load it as a NAR package produced error: " + narClassLoaderException.getMessage()); + } - throw new IllegalArgumentException(errorMsg.toString()); + throw new IllegalArgumentException(errorMsg.toString()); + } + } + } finally { + if (!keepJarClassLoader) { + ClassLoaderUtils.closeClassLoader(jarClassLoader); + } + if (!keepNarClassLoader) { + ClassLoaderUtils.closeClassLoader(narClassLoader); } } } diff --git a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java index 539f066a6db10..9b8d8d4f4ed85 100644 --- a/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java +++ b/pulsar-functions/utils/src/main/java/org/apache/pulsar/functions/utils/FunctionConfigUtils.java @@ -56,7 +56,7 @@ @Slf4j public class FunctionConfigUtils { - static final Integer MAX_PENDING_ASYNC_REQUESTS_DEFAULT = Integer.valueOf(1000); + static final Integer MAX_PENDING_ASYNC_REQUESTS_DEFAULT = 1000; static final Boolean FORWARD_SOURCE_MESSAGE_PROPERTY_DEFAULT = Boolean.TRUE; private static final ObjectMapper OBJECT_MAPPER = ObjectMapperFactory.create(); diff --git a/pulsar-functions/worker/pom.xml b/pulsar-functions/worker/pom.xml index f5709031047d1..385f7fb6ed1ae 100644 --- a/pulsar-functions/worker/pom.xml +++ b/pulsar-functions/worker/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-functions - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/ClusterServiceCoordinator.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/ClusterServiceCoordinator.java index c1682a8607225..570408e975f34 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/ClusterServiceCoordinator.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/ClusterServiceCoordinator.java @@ -19,6 +19,7 @@ package org.apache.pulsar.functions.worker; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -69,7 +70,7 @@ public void start() { for (Map.Entry entry : this.tasks.entrySet()) { TimerTaskInfo timerTaskInfo = entry.getValue(); String taskName = entry.getKey(); - this.executor.scheduleAtFixedRate(() -> { + this.executor.scheduleAtFixedRate(catchingAndLoggingThrowables(() -> { if (isLeader.get()) { try { timerTaskInfo.getTask().run(); @@ -77,7 +78,7 @@ public void start() { log.error("Cluster timer task {} failed with exception.", taskName, e); } } - }, timerTaskInfo.getInterval(), timerTaskInfo.getInterval(), TimeUnit.MILLISECONDS); + }), timerTaskInfo.getInterval(), timerTaskInfo.getInterval(), TimeUnit.MILLISECONDS); } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java index 6497e1547896e..758157976f11f 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionActioner.java @@ -18,6 +18,14 @@ */ package org.apache.pulsar.functions.worker; +import static org.apache.commons.lang3.StringUtils.isBlank; +import static org.apache.pulsar.common.functions.Utils.FILE; +import static org.apache.pulsar.common.functions.Utils.HTTP; +import static org.apache.pulsar.common.functions.Utils.hasPackageTypePrefix; +import static org.apache.pulsar.common.functions.Utils.isFunctionPackageUrlSupported; +import static org.apache.pulsar.functions.auth.FunctionAuthUtils.getFunctionAuthData; +import static org.apache.pulsar.functions.utils.FunctionCommon.getSinkType; +import static org.apache.pulsar.functions.utils.FunctionCommon.getSourceType; import com.fasterxml.jackson.core.JsonProcessingException; import com.google.common.io.MoreFiles; import com.google.common.io.RecursiveDeleteOption; @@ -65,14 +73,6 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.apache.commons.lang3.StringUtils.isBlank; -import static org.apache.pulsar.common.functions.Utils.FILE; -import static org.apache.pulsar.common.functions.Utils.HTTP; -import static org.apache.pulsar.common.functions.Utils.isFunctionPackageUrlSupported; -import static org.apache.pulsar.functions.auth.FunctionAuthUtils.getFunctionAuthData; -import static org.apache.pulsar.functions.utils.FunctionCommon.getSinkType; -import static org.apache.pulsar.functions.utils.FunctionCommon.getSourceType; - @Data @Slf4j public class FunctionActioner { @@ -192,7 +192,8 @@ InstanceConfig createInstanceConfig(FunctionDetails functionDetails, Function.Fu return instanceConfig; } - private void downloadFile(File pkgFile, boolean isPkgUrlProvided, FunctionMetaData functionMetaData, int instanceId) throws FileNotFoundException, IOException { + private void downloadFile(File pkgFile, boolean isPkgUrlProvided, FunctionMetaData functionMetaData, + int instanceId) throws FileNotFoundException, IOException, PulsarAdminException { FunctionDetails details = functionMetaData.getFunctionDetails(); File pkgDir = pkgFile.getParentFile(); @@ -204,22 +205,22 @@ private void downloadFile(File pkgFile, boolean isPkgUrlProvided, FunctionMetaDa } File tempPkgFile; - while (true) { + do { tempPkgFile = new File( pkgDir, pkgFile.getName() + "." + instanceId + "." + UUID.randomUUID().toString()); - if (!tempPkgFile.exists() && tempPkgFile.createNewFile()) { - break; - } - } + } while (tempPkgFile.exists() || !tempPkgFile.createNewFile()); String pkgLocationPath = functionMetaData.getPackageLocation().getPackagePath(); boolean downloadFromHttp = isPkgUrlProvided && pkgLocationPath.startsWith(HTTP); + boolean downloadFromPackageManagementService = isPkgUrlProvided && hasPackageTypePrefix(pkgLocationPath); log.info("{}/{}/{} Function package file {} will be downloaded from {}", tempPkgFile, details.getTenant(), details.getNamespace(), details.getName(), downloadFromHttp ? pkgLocationPath : functionMetaData.getPackageLocation()); if(downloadFromHttp) { FunctionCommon.downloadFromHttpUrl(pkgLocationPath, tempPkgFile); + } else if (downloadFromPackageManagementService) { + getPulsarAdmin().packages().download(pkgLocationPath, tempPkgFile.getPath()); } else { FileOutputStream tempPkgFos = new FileOutputStream(tempPkgFile); WorkerUtils.downloadFromBookkeeper( diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionMetaDataManager.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionMetaDataManager.java index 1ff1b2bdf17ef..3d8c07e927c20 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionMetaDataManager.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionMetaDataManager.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.functions.worker; -import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.Collection; import java.util.LinkedList; @@ -62,7 +61,6 @@ public class FunctionMetaDataManager implements AutoCloseable { // Represents the global state // tenant -> namespace -> (function name, FunctionRuntimeInfo) - @VisibleForTesting final Map>> functionMetaDataMap = new ConcurrentHashMap<>(); private final SchedulerManager schedulerManager; @@ -240,7 +238,7 @@ public synchronized void updateFunctionOnLeader(FunctionMetaData functionMetaDat } lastMessageSeen = builder.send(); if (delete) { - needsScheduling = proccessDeregister(functionMetaData); + needsScheduling = processDeregister(functionMetaData); } else { needsScheduling = processUpdate(functionMetaData); } @@ -359,7 +357,7 @@ private void processUncompactedMetaDataTopicMessage(Message message) thr this.processUpdate(serviceRequest.getFunctionMetaData()); break; case DELETE: - this.proccessDeregister(serviceRequest.getFunctionMetaData()); + this.processDeregister(serviceRequest.getFunctionMetaData()); break; default: log.warn("Received request with unrecognized type: {}", serviceRequest); @@ -367,13 +365,13 @@ private void processUncompactedMetaDataTopicMessage(Message message) thr } private void processCompactedMetaDataTopicMessage(Message message) throws IOException { - long version = Long.valueOf(message.getProperty(versionTag)); + long version = Long.parseLong(message.getProperty(versionTag)); String tenant = FunctionCommon.extractTenantFromFullyQualifiedName(message.getKey()); String namespace = FunctionCommon.extractNamespaceFromFullyQualifiedName(message.getKey()); String functionName = FunctionCommon.extractNameFromFullyQualifiedName(message.getKey()); if (message.getData() == null || message.getData().length == 0) { // this is a delete message - this.proccessDeregister(tenant, namespace, functionName, version); + this.processDeregister(tenant, namespace, functionName, version); } else { FunctionMetaData functionMetaData = FunctionMetaData.parseFrom(message.getData()); this.processUpdate(functionMetaData); @@ -404,20 +402,20 @@ private boolean containsFunctionMetaData(String tenant, String namespace, String return false; } - @VisibleForTesting - synchronized boolean proccessDeregister(FunctionMetaData deregisterRequestFs) throws IllegalArgumentException { + synchronized boolean processDeregister(FunctionMetaData deregisterRequestFs) throws IllegalArgumentException { String functionName = deregisterRequestFs.getFunctionDetails().getName(); String tenant = deregisterRequestFs.getFunctionDetails().getTenant(); String namespace = deregisterRequestFs.getFunctionDetails().getNamespace(); - return proccessDeregister(tenant, namespace, functionName, deregisterRequestFs.getVersion()); + return processDeregister(tenant, namespace, functionName, deregisterRequestFs.getVersion()); } - synchronized boolean proccessDeregister(String tenant, String namespace, - String functionName, long version) throws IllegalArgumentException { + synchronized boolean processDeregister(String tenant, String namespace, + String functionName, long version) throws IllegalArgumentException { boolean needsScheduling = false; - - log.debug("Process deregister request: {}/{}/{}/{}", tenant, namespace, functionName, version); + if (log.isDebugEnabled()) { + log.debug("Process deregister request: {}/{}/{}/{}", tenant, namespace, functionName, version); + } // Check if we still have this function. Maybe already deleted by someone else if (this.containsFunctionMetaData(tenant, namespace, functionName)) { @@ -437,7 +435,6 @@ synchronized boolean proccessDeregister(String tenant, String namespace, return needsScheduling; } - @VisibleForTesting synchronized boolean processUpdate(FunctionMetaData updateRequestFs) throws IllegalArgumentException { log.debug("Process update request: {}", updateRequestFs); @@ -481,7 +478,6 @@ private boolean isRequestOutdated(String tenant, String namespace, String functi return currentFunctionMetaData.getVersion() >= version; } - @VisibleForTesting void setFunctionMetaData(FunctionMetaData functionMetaData) { Function.FunctionDetails functionDetails = functionMetaData.getFunctionDetails(); if (!this.functionMetaDataMap.containsKey(functionDetails.getTenant())) { diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java index eaad1320d7ce8..d37049fe4029e 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionRuntimeManager.java @@ -79,12 +79,10 @@ public class FunctionRuntimeManager implements AutoCloseable{ // all assignments // WorkerId -> Function Fully Qualified InstanceId -> List - @VisibleForTesting Map> workerIdToAssignments = new ConcurrentHashMap<>(); // All the runtime info related to functions executed by this worker // Fully Qualified InstanceId - > FunctionRuntimeInfo - @VisibleForTesting class FunctionRuntimeInfos { private Map functionRuntimeInfoMap = new ConcurrentHashMap<>(); @@ -114,10 +112,8 @@ public int size() { } } - @VisibleForTesting final FunctionRuntimeInfos functionRuntimeInfos = new FunctionRuntimeInfos(); - @VisibleForTesting @Getter final WorkerConfig workerConfig; @@ -266,10 +262,6 @@ public MessageId initialize() { } } - /** - * Starts the function runtime manager - */ - /** * Get current assignments * @return a map of current assignments in the following format @@ -827,7 +819,6 @@ public synchronized void deleteAssignment(String fullyQualifiedInstanceId) { } } - @VisibleForTesting void deleteAssignment(Assignment assignment) { String fullyQualifiedInstanceId = FunctionCommon.getFullyQualifiedInstanceId(assignment.getInstance()); Map assignmentMap = this.workerIdToAssignments.get(assignment.getWorkerId()); diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionsStatsGenerator.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionsStatsGenerator.java index 7dd1e74b8e5bc..d94f54c253e0e 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionsStatsGenerator.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/FunctionsStatsGenerator.java @@ -68,7 +68,10 @@ public static void generate(PulsarWorkerService workerService, SimpleTextOutputS if (functionRuntime != null) { try { - out.write(functionRuntime.getPrometheusMetrics()); + String prometheusMetrics = functionRuntime.getPrometheusMetrics(); + if (prometheusMetrics != null) { + out.write(prometheusMetrics); + } } catch (IOException e) { log.warn("Failed to collect metrics for function instance {}", diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java index fb11fab785b8c..645cdcb2bae96 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/LeaderService.java @@ -103,7 +103,8 @@ public void becameActive(Consumer consumer, int partitionId) { // attempt to acquire exclusive publishers to both the metadata topic and assignments topic // we should keep trying to acquire exclusive producers as long as we are still the leader - Supplier checkIsStillLeader = () -> membershipManager.getLeader().getWorkerId().equals(workerConfig.getWorkerId()); + Supplier checkIsStillLeader = WorkerUtils.getIsStillLeaderSupplier(membershipManager, + workerConfig.getWorkerId()); Producer scheduleManagerExclusiveProducer = null; Producer functionMetaDataManagerExclusiveProducer = null; try { diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java index 9c47dfe8b2bb3..9ac0374675620 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/PulsarWorkerService.java @@ -19,9 +19,6 @@ package org.apache.pulsar.functions.worker; import static org.apache.pulsar.common.policies.data.PoliciesUtil.getBundles; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; import io.netty.util.concurrent.DefaultThreadFactory; @@ -45,7 +42,6 @@ import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationService; import org.apache.pulsar.broker.authorization.AuthorizationService; -import org.apache.pulsar.broker.cache.ConfigurationCacheService; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.client.admin.PulsarAdminException; @@ -55,12 +51,10 @@ import org.apache.pulsar.common.conf.InternalConfigurationData; import org.apache.pulsar.common.naming.NamedEntity; import org.apache.pulsar.common.naming.NamespaceName; -import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.policies.data.ClusterDataImpl; import org.apache.pulsar.common.policies.data.Policies; import org.apache.pulsar.common.policies.data.RetentionPolicies; import org.apache.pulsar.common.policies.data.TenantInfoImpl; -import org.apache.pulsar.common.policies.path.PolicyPath; import org.apache.pulsar.common.util.SimpleTextOutputStream; import org.apache.pulsar.functions.worker.rest.api.FunctionsImpl; import org.apache.pulsar.functions.worker.rest.api.FunctionsImplV2; @@ -139,7 +133,8 @@ public PulsarAdmin newPulsarAdmin(String pulsarServiceUrl, WorkerConfig workerCo workerConfig.getBrokerClientAuthenticationParameters(), workerConfig.getBrokerClientTrustCertsFilePath(), workerConfig.isTlsAllowInsecureConnection(), - workerConfig.isTlsEnableHostnameVerification()); + workerConfig.isTlsEnableHostnameVerification(), + workerConfig); } else { return WorkerUtils.getPulsarAdminClient( pulsarServiceUrl, @@ -147,7 +142,8 @@ public PulsarAdmin newPulsarAdmin(String pulsarServiceUrl, WorkerConfig workerCo null, null, workerConfig.isTlsAllowInsecureConnection(), - workerConfig.isTlsEnableHostnameVerification()); + workerConfig.isTlsEnableHostnameVerification(), + workerConfig); } } @@ -162,7 +158,8 @@ public PulsarClient newPulsarClient(String pulsarServiceUrl, WorkerConfig worker workerConfig.isUseTls(), workerConfig.getBrokerClientTrustCertsFilePath(), workerConfig.isTlsAllowInsecureConnection(), - workerConfig.isTlsEnableHostnameVerification()); + workerConfig.isTlsEnableHostnameVerification(), + workerConfig); } else { return WorkerUtils.getPulsarClient( pulsarServiceUrl, @@ -171,7 +168,8 @@ public PulsarClient newPulsarClient(String pulsarServiceUrl, WorkerConfig worker null, null, workerConfig.isTlsAllowInsecureConnection(), - workerConfig.isTlsEnableHostnameVerification()); + workerConfig.isTlsEnableHostnameVerification(), + workerConfig); } } }; @@ -188,7 +186,6 @@ public void generateFunctionsStats(SimpleTextOutputStream out) { ); } - @VisibleForTesting public void init(WorkerConfig workerConfig, URI dlogUri, boolean runAsStandalone) { @@ -403,12 +400,7 @@ public void start(AuthenticationService authenticationService, workerStatsManager.startupTimeStart(); log.info("/** Starting worker id={} **/", workerConfig.getWorkerId()); - - try { - log.info("Worker Configs: {}", new ObjectMapper().writeValueAsString(workerConfig)); - } catch (JsonProcessingException e) { - log.warn("Failed to print worker configs with error {}", e.getMessage(), e); - } + log.info("Worker Configs: {}", workerConfig); try { DistributedLogConfiguration dlogConf = WorkerUtils.getDlogConf(workerConfig); @@ -507,7 +499,9 @@ public void start(AuthenticationService authenticationService, log.info("/** Initializing Runtime Manager **/"); MessageId lastAssignmentMessageId = functionRuntimeManager.initialize(); - Supplier checkIsStillLeader = () -> membershipManager.getLeader().getWorkerId().equals(workerConfig.getWorkerId()); + + Supplier checkIsStillLeader = WorkerUtils.getIsStillLeaderSupplier(membershipManager, + workerConfig.getWorkerId()); // Setting references to managers in scheduler schedulerManager.setFunctionMetaDataManager(functionMetaDataManager); diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/SchedulerManager.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/SchedulerManager.java index b7fd0a4690740..2fed4991b93b8 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/SchedulerManager.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/SchedulerManager.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.functions.worker; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import com.fasterxml.jackson.core.JsonProcessingException; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -370,7 +371,6 @@ private synchronized Set getCurrentAvailableWorkers() { return currentMembership; } - @VisibleForTesting void invokeScheduler() { long startTime = System.nanoTime(); @@ -532,19 +532,19 @@ private void invokeRebalance() { private void scheduleCompaction(ScheduledExecutorService executor, long scheduleFrequencySec) { if (executor != null) { - executor.scheduleWithFixedDelay(() -> { + executor.scheduleWithFixedDelay(catchingAndLoggingThrowables(() -> { if (leaderService.isLeader() && isCompactionNeeded.get()) { compactAssignmentTopic(); isCompactionNeeded.set(false); } - }, scheduleFrequencySec, scheduleFrequencySec, TimeUnit.SECONDS); + }), scheduleFrequencySec, scheduleFrequencySec, TimeUnit.SECONDS); - executor.scheduleWithFixedDelay(() -> { + executor.scheduleWithFixedDelay(catchingAndLoggingThrowables(() -> { if (leaderService.isLeader() && metadataTopicLastMessage.compareTo(functionMetaDataManager.getLastMessageSeen()) != 0) { metadataTopicLastMessage = functionMetaDataManager.getLastMessageSeen(); compactFunctionMetadataTopic(); } - }, scheduleFrequencySec, scheduleFrequencySec, TimeUnit.SECONDS); + }), scheduleFrequencySec, scheduleFrequencySec, TimeUnit.SECONDS); } } @@ -560,7 +560,6 @@ void clearAssignmentsMovedInLastDrain() { assignmentsMovedInLastDrain = null; } - @VisibleForTesting List invokeDrain(String workerId) { long startTime = System.nanoTime(); diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerService.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerService.java index f76f7536dfb01..f5293ceded295 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerService.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerService.java @@ -21,7 +21,6 @@ import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationService; import org.apache.pulsar.broker.authorization.AuthorizationService; -import org.apache.pulsar.broker.cache.ConfigurationCacheService; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.common.conf.InternalConfigurationData; import org.apache.pulsar.common.util.SimpleTextOutputStream; diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerStatsManager.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerStatsManager.java index c8b411cbf5706..0076a2f20e914 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerStatsManager.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerStatsManager.java @@ -328,6 +328,12 @@ private void generateLeaderMetrics(StringWriter stream) { } private void writeMetric(String metricName, long value, StringWriter stream) { + stream.write("# TYPE "); + stream.write(PULSAR_FUNCTION_WORKER_METRICS_PREFIX); + stream.write(metricName); + stream.write(" gauge"); + stream.write("\n"); + stream.write(PULSAR_FUNCTION_WORKER_METRICS_PREFIX); stream.write(metricName); stream.write("{"); diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerUtils.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerUtils.java index d7c6a71f8c36f..741a89bc397bd 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerUtils.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/WorkerUtils.java @@ -40,9 +40,11 @@ import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.ReaderBuilder; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.conf.InternalConfigurationData; -import org.apache.pulsar.common.policies.data.FunctionInstanceStatsImpl; +import org.apache.pulsar.common.functions.WorkerInfo; import org.apache.pulsar.common.policies.data.FunctionInstanceStatsDataImpl; +import org.apache.pulsar.common.policies.data.FunctionInstanceStatsImpl; import org.apache.pulsar.functions.proto.InstanceCommunication; import org.apache.pulsar.functions.runtime.Runtime; import org.apache.pulsar.functions.runtime.RuntimeSpawner; @@ -157,6 +159,13 @@ public static DistributedLogConfiguration getDlogConf(WorkerConfig workerConfig) workerConfig.getBookkeeperClientAuthenticationParameters()); } } + // Map arbitrary bookkeeper client configuration into DLog Config. Note that this only configures the + // bookie client. + PropertiesUtils.filterAndMapProperties(workerConfig.getProperties(), "bookkeeper_", "bkc.") + .forEach((key, value) -> { + log.info("Applying DLog BookKeeper client configuration setting {}={}", key, value); + conf.setProperty(key, value); + }); return conf; } @@ -193,12 +202,20 @@ public static URI initializeDlogNamespace(InternalConfigurationData internalConf } public static PulsarAdmin getPulsarAdminClient(String pulsarWebServiceUrl) { - return getPulsarAdminClient(pulsarWebServiceUrl, null, null, null, null, null); + return getPulsarAdminClient(pulsarWebServiceUrl, null, null, null, null, null, null); } public static PulsarAdmin getPulsarAdminClient(String pulsarWebServiceUrl, String authPlugin, String authParams, String tlsTrustCertsFilePath, Boolean allowTlsInsecureConnection, Boolean enableTlsHostnameVerificationEnable) { + return getPulsarAdminClient(pulsarWebServiceUrl, authPlugin, authParams, tlsTrustCertsFilePath, + allowTlsInsecureConnection, enableTlsHostnameVerificationEnable, null); + } + + public static PulsarAdmin getPulsarAdminClient(String pulsarWebServiceUrl, String authPlugin, String authParams, + String tlsTrustCertsFilePath, Boolean allowTlsInsecureConnection, + Boolean enableTlsHostnameVerificationEnable, + WorkerConfig workerConfig) { log.info("Create Pulsar Admin to service url {}: " + "authPlugin = {}, authParams = {}, " + "tlsTrustCerts = {}, allowTlsInsecureConnector = {}, enableTlsHostnameVerification = {}", @@ -206,6 +223,13 @@ public static PulsarAdmin getPulsarAdminClient(String pulsarWebServiceUrl, Strin tlsTrustCertsFilePath, allowTlsInsecureConnection, enableTlsHostnameVerificationEnable); try { PulsarAdminBuilder adminBuilder = PulsarAdmin.builder().serviceHttpUrl(pulsarWebServiceUrl); + if (workerConfig != null) { + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + adminBuilder.loadConf( + PropertiesUtils.filterAndMapProperties(workerConfig.getProperties(), "brokerClient_")); + } if (isNotBlank(authPlugin) && isNotBlank(authParams)) { adminBuilder.authentication(authPlugin, authParams); } @@ -218,6 +242,7 @@ public static PulsarAdmin getPulsarAdminClient(String pulsarWebServiceUrl, Strin if (enableTlsHostnameVerificationEnable != null) { adminBuilder.enableTlsHostnameVerification(enableTlsHostnameVerificationEnable); } + return adminBuilder.build(); } catch (PulsarClientException e) { log.error("Error creating pulsar admin client", e); @@ -227,17 +252,33 @@ public static PulsarAdmin getPulsarAdminClient(String pulsarWebServiceUrl, Strin public static PulsarClient getPulsarClient(String pulsarServiceUrl) { return getPulsarClient(pulsarServiceUrl, null, null, null, - null, null, null); + null, null, null, null); } public static PulsarClient getPulsarClient(String pulsarServiceUrl, String authPlugin, String authParams, Boolean useTls, String tlsTrustCertsFilePath, Boolean allowTlsInsecureConnection, Boolean enableTlsHostnameVerificationEnable) { + return getPulsarClient(pulsarServiceUrl, authPlugin, authParams, useTls, tlsTrustCertsFilePath, + allowTlsInsecureConnection, enableTlsHostnameVerificationEnable, null); + } + + public static PulsarClient getPulsarClient(String pulsarServiceUrl, String authPlugin, String authParams, + Boolean useTls, String tlsTrustCertsFilePath, + Boolean allowTlsInsecureConnection, + Boolean enableTlsHostnameVerificationEnable, + WorkerConfig workerConfig) { try { ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(pulsarServiceUrl); + if (workerConfig != null) { + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + clientBuilder.loadConf( + PropertiesUtils.filterAndMapProperties(workerConfig.getProperties(), "brokerClient_")); + } if (isNotBlank(authPlugin) && isNotBlank(authParams)) { clientBuilder.authentication(authPlugin, authParams); @@ -254,7 +295,6 @@ && isNotBlank(authParams)) { if (enableTlsHostnameVerificationEnable != null) { clientBuilder.enableTlsHostnameVerification(enableTlsHostnameVerificationEnable); } - return clientBuilder.build(); } catch (PulsarClientException e) { log.error("Error creating pulsar client", e); @@ -352,7 +392,9 @@ public static Producer createExclusiveProducerWithRetry(PulsarClient cli } tries++; if (tries % 6 == 0) { - log.warn("Failed to acquire exclusive producer to topic {} after {} attempts. Will retry if we are still the leader.", topic, tries); + if (log.isDebugEnabled()) { + log.debug("Failed to acquire exclusive producer to topic {} after {} attempts. Will retry if we are still the leader.", topic, tries); + } } Thread.sleep(sleepInBetweenMs); } while (isLeader.get()); @@ -366,4 +408,12 @@ public static Producer createExclusiveProducerWithRetry(PulsarClient cli public static class NotLeaderAnymore extends Exception { } + + public static Supplier getIsStillLeaderSupplier(final MembershipManager membershipManager, + final String workerId) { + return () -> { + WorkerInfo workerInfo = membershipManager.getLeader(); + return workerInfo != null && workerInfo.getWorkerId().equals(workerId); + }; + } } diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/dlog/DLInputStream.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/dlog/DLInputStream.java index 10acb54350020..27af304741280 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/dlog/DLInputStream.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/dlog/DLInputStream.java @@ -40,10 +40,8 @@ public class DLInputStream extends InputStream { // Cache the input stream for a log record. private static class LogRecordWithInputStream { private final InputStream payloadStream; - private final LogRecordWithDLSN logRecord; LogRecordWithInputStream(LogRecordWithDLSN logRecord) { - this.logRecord = logRecord; this.payloadStream = logRecord.getPayLoadInputStream(); } @@ -51,15 +49,6 @@ InputStream getPayLoadInputStream() { return payloadStream; } - LogRecordWithDLSN getLogRecord() { - return logRecord; - } - - // The last txid of the log record is the position of the next byte in the stream. - // Subtract length to get starting offset. - long getOffset() { - return logRecord.getTransactionId() - logRecord.getPayload().length; - } } /** diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/WorkerServer.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/WorkerServer.java index c7414c23734d7..831a474a51da6 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/WorkerServer.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/WorkerServer.java @@ -18,7 +18,6 @@ */ package org.apache.pulsar.functions.worker.rest; -import com.google.common.annotations.VisibleForTesting; import io.prometheus.client.jetty.JettyStatisticsCollector; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.broker.authentication.AuthenticationService; @@ -26,11 +25,11 @@ import org.apache.pulsar.broker.web.RateLimitingFilter; import org.apache.pulsar.broker.web.JettyRequestLogFactory; import org.apache.pulsar.broker.web.WebExecutorThreadPool; -import org.apache.pulsar.common.util.SecurityUtility; import org.apache.pulsar.functions.worker.WorkerConfig; import org.apache.pulsar.functions.worker.WorkerService; import org.apache.pulsar.functions.worker.rest.api.v2.WorkerApiV2Resource; import org.apache.pulsar.functions.worker.rest.api.v2.WorkerStatsApiV2Resource; +import org.apache.pulsar.jetty.tls.JettySslContextFactory; import org.eclipse.jetty.server.Handler; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; @@ -46,8 +45,6 @@ import org.glassfish.jersey.server.ResourceConfig; import org.glassfish.jersey.servlet.ServletContainer; -import java.net.BindException; -import java.net.URI; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; @@ -69,15 +66,6 @@ public class WorkerServer { private ServerConnector httpConnector; private ServerConnector httpsConnector; - private static String getErrorMessage(Server server, int port, Exception ex) { - if (ex instanceof BindException) { - final URI uri = server.getURI(); - return String.format("%s http://%s:%d", ex.getMessage(), uri.getHost(), port); - } - - return ex.getMessage(); - } - public WorkerServer(WorkerService workerService, AuthenticationService authenticationService) { this.workerConfig = workerService.getWorkerConfig(); this.workerService = workerService; @@ -134,13 +122,36 @@ private void init() { if (this.workerConfig.getTlsEnabled()) { try { - SslContextFactory sslCtxFactory = SecurityUtility.createSslContextFactory( - this.workerConfig.isTlsAllowInsecureConnection(), this.workerConfig.getTlsTrustCertsFilePath(), - this.workerConfig.getTlsCertificateFilePath(), this.workerConfig.getTlsKeyFilePath(), - this.workerConfig.isTlsRequireTrustedClientCertOnConnect(), - true, - this.workerConfig.getTlsCertRefreshCheckDurationSec()); - httpsConnector = new ServerConnector(server, 1, 1, sslCtxFactory); + SslContextFactory sslCtxFactory; + if (workerConfig.isTlsEnabledWithKeyStore()) { + sslCtxFactory = JettySslContextFactory.createServerSslContextWithKeystore( + workerConfig.getTlsProvider(), + workerConfig.getTlsKeyStoreType(), + workerConfig.getTlsKeyStore(), + workerConfig.getTlsKeyStorePassword(), + workerConfig.isTlsAllowInsecureConnection(), + workerConfig.getTlsTrustStoreType(), + workerConfig.getTlsTrustStore(), + workerConfig.getTlsTrustStorePassword(), + workerConfig.isTlsRequireTrustedClientCertOnConnect(), + workerConfig.getWebServiceTlsCiphers(), + workerConfig.getWebServiceTlsProtocols(), + workerConfig.getTlsCertRefreshCheckDurationSec() + ); + } else { + sslCtxFactory = JettySslContextFactory.createServerSslContext( + workerConfig.getTlsProvider(), + workerConfig.isTlsAllowInsecureConnection(), + workerConfig.getTlsTrustCertsFilePath(), + workerConfig.getTlsCertificateFilePath(), + workerConfig.getTlsKeyFilePath(), + workerConfig.isTlsRequireTrustedClientCertOnConnect(), + workerConfig.getWebServiceTlsCiphers(), + workerConfig.getWebServiceTlsProtocols(), + workerConfig.getTlsCertRefreshCheckDurationSec() + ); + } + httpsConnector = new ServerConnector(server, sslCtxFactory); httpsConnector.setPort(this.workerConfig.getWorkerPortTls()); connectors.add(httpsConnector); } catch (Exception e) { @@ -191,7 +202,6 @@ public static ServletContextHandler newServletContextHandler(String contextPath, return contextHandler; } - @VisibleForTesting public void stop() { if (this.server != null) { try { diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java index c1bc9960b9ab8..bad35e5443e44 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/ComponentImpl.java @@ -28,7 +28,7 @@ import static org.apache.pulsar.functions.utils.FunctionCommon.getUniquePackageName; import static org.apache.pulsar.functions.utils.FunctionCommon.isFunctionCodeBuiltin; import static org.apache.pulsar.functions.worker.rest.RestUtils.throwUnavailableException; - +import com.google.common.base.Utf8; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; @@ -267,10 +267,7 @@ boolean isWorkerServiceAvailable() { if (workerService == null) { return false; } - if (!workerService.isInitialized()) { - return false; - } - return true; + return workerService.isInitialized(); } PackageLocationMetaData.Builder getFunctionPackageLocation(final FunctionMetaData functionMetaData, @@ -344,10 +341,9 @@ private void deleteStatestoreTableAsync(String namespace, String table) { StorageAdminClient adminClient = worker().getStateStoreAdminClient(); if (adminClient != null) { adminClient.deleteStream(namespace, table).whenComplete((res, throwable) -> { - if ((throwable == null && res.booleanValue()) - || (throwable != null && - (throwable instanceof NamespaceNotFoundException - || throwable instanceof StreamNotFoundException) )) { + if ((throwable == null && res) + || ((throwable instanceof NamespaceNotFoundException + || throwable instanceof StreamNotFoundException))) { log.info("{}/{} table deleted successfully", namespace, table); } else { if (throwable != null) { @@ -1102,10 +1098,13 @@ public FunctionState getFunctionState(final String tenant, if (kv.isNumber()) { value = new FunctionState(key, null, null, kv.numberValue(), kv.version()); } else { - try { - value = new FunctionState(key, new String(ByteBufUtil.getBytes(kv.value(), kv.value().readerIndex(), kv.value().readableBytes()), UTF_8), null, null, kv.version()); - } catch (Exception e) { - value = new FunctionState(key, null, ByteBufUtil.getBytes(kv.value()), null, kv.version()); + byte[] bytes = ByteBufUtil.getBytes(kv.value()); + if (Utf8.isWellFormed(bytes)) { + value = new FunctionState(key, new String(bytes, UTF_8), + null, null, kv.version()); + } else { + value = new FunctionState( + key, null, bytes, null, kv.version()); } } } @@ -1151,7 +1150,7 @@ public void putFunctionState(final String tenant, log.error("{}/{}/{} Failed to authorize [{}]", tenant, namespace, functionName, e); throw new RestException(Status.INTERNAL_SERVER_ERROR, e.getMessage()); } - + if (!key.equals(state.getKey())) { log.error("{}/{}/{} Bad putFunction Request, path key doesn't match key in json", tenant, namespace, functionName); throw new RestException(Status.BAD_REQUEST, "Path key doesn't match key in json"); diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java index 8c3585100cb95..506ca452857a5 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImpl.java @@ -32,7 +32,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Arrays; import java.util.Collection; import java.util.LinkedList; import java.util.List; @@ -64,7 +63,6 @@ import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Functions; -import org.apache.pulsar.packages.management.core.common.PackageType; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @Slf4j @@ -704,7 +702,7 @@ public void updateFunctionOnWorkerLeader(final String tenant, // Redirect if we are not the leader if (!worker().getLeaderService().isLeader()) { WorkerInfo workerInfo = worker().getMembershipManager().getLeader(); - if (workerInfo.getWorkerId().equals(worker().getWorkerConfig().getWorkerId())) { + if (workerInfo == null || workerInfo.getWorkerId().equals(worker().getWorkerConfig().getWorkerId())) { throw new RestException(Response.Status.SERVICE_UNAVAILABLE, "Leader not yet ready. Please retry again"); } @@ -771,6 +769,7 @@ static File downloadPackageFile(PulsarWorkerService worker, String packageName) // use the Nar extraction directory as a temporary directory for downloaded files tempDirectory = Paths.get(worker.getWorkerConfig().getNarExtractionDirectory()); } + Files.createDirectories(tempDirectory); File file = Files.createTempFile(tempDirectory, "function", ".tmp").toFile(); worker.getBrokerAdmin().packages().download(packageName, file.toString()); return file; diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java index 31a72347de04c..ab69ab9182cf9 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SinksImpl.java @@ -30,7 +30,6 @@ import java.io.InputStream; import java.net.URI; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.LinkedList; import java.util.List; @@ -50,6 +49,7 @@ import org.apache.pulsar.common.io.SinkConfig; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.SinkStatus; +import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; @@ -63,7 +63,6 @@ import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Sinks; -import org.apache.pulsar.packages.management.core.common.PackageType; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @Slf4j @@ -734,19 +733,28 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant } } - // if sink is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && sinkPackageFile != null) { - classLoader = getClassLoaderFromPackage(sinkConfig.getClassName(), - sinkPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - } + boolean shouldCloseClassLoader = false; + try { - if (classLoader == null) { - throw new IllegalArgumentException("Sink package is not provided"); - } + // if sink is not builtin, attempt to extract classloader from package file if it exists + if (classLoader == null && sinkPackageFile != null) { + classLoader = getClassLoaderFromPackage(sinkConfig.getClassName(), + sinkPackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); + shouldCloseClassLoader = true; + } - SinkConfigUtils.ExtractedSinkDetails sinkDetails = SinkConfigUtils.validateAndExtractDetails( - sinkConfig, classLoader, worker().getWorkerConfig().getValidateConnectorConfig()); - return SinkConfigUtils.convert(sinkConfig, sinkDetails); + if (classLoader == null) { + throw new IllegalArgumentException("Sink package is not provided"); + } + + SinkConfigUtils.ExtractedSinkDetails sinkDetails = SinkConfigUtils.validateAndExtractDetails( + sinkConfig, classLoader, worker().getWorkerConfig().getValidateConnectorConfig()); + return SinkConfigUtils.convert(sinkConfig, sinkDetails); + } finally { + if (shouldCloseClassLoader) { + ClassLoaderUtils.closeClassLoader(classLoader); + } + } } private File downloadPackageFile(String packageName) throws IOException, PulsarAdminException { diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java index 1e9148bd658ae..df2dca813e770 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/SourcesImpl.java @@ -30,7 +30,6 @@ import java.io.InputStream; import java.net.URI; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.LinkedList; import java.util.List; @@ -50,6 +49,7 @@ import org.apache.pulsar.common.io.SourceConfig; import org.apache.pulsar.common.policies.data.ExceptionInformation; import org.apache.pulsar.common.policies.data.SourceStatus; +import org.apache.pulsar.common.util.ClassLoaderUtils; import org.apache.pulsar.common.util.RestException; import org.apache.pulsar.functions.auth.FunctionAuthData; import org.apache.pulsar.functions.instance.InstanceUtils; @@ -63,7 +63,6 @@ import org.apache.pulsar.functions.worker.PulsarWorkerService; import org.apache.pulsar.functions.worker.WorkerUtils; import org.apache.pulsar.functions.worker.service.api.Sources; -import org.apache.pulsar.packages.management.core.common.PackageType; import org.glassfish.jersey.media.multipart.FormDataContentDisposition; @Slf4j @@ -730,20 +729,28 @@ private Function.FunctionDetails validateUpdateRequestParams(final String tenant } } - // if source is not builtin, attempt to extract classloader from package file if it exists - if (classLoader == null && sourcePackageFile != null) { - classLoader = getClassLoaderFromPackage(sourceConfig.getClassName(), - sourcePackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); - } + boolean shouldCloseClassLoader = false; + try { + // if source is not builtin, attempt to extract classloader from package file if it exists + if (classLoader == null && sourcePackageFile != null) { + classLoader = getClassLoaderFromPackage(sourceConfig.getClassName(), + sourcePackageFile, worker().getWorkerConfig().getNarExtractionDirectory()); + shouldCloseClassLoader = true; + } - if (classLoader == null) { - throw new IllegalArgumentException("Source package is not provided"); - } + if (classLoader == null) { + throw new IllegalArgumentException("Source package is not provided"); + } - SourceConfigUtils.ExtractedSourceDetails sourceDetails - = SourceConfigUtils.validateAndExtractDetails( - sourceConfig, classLoader, worker().getWorkerConfig().getValidateConnectorConfig()); - return SourceConfigUtils.convert(sourceConfig, sourceDetails); + SourceConfigUtils.ExtractedSourceDetails sourceDetails + = SourceConfigUtils.validateAndExtractDetails( + sourceConfig, classLoader, worker().getWorkerConfig().getValidateConnectorConfig()); + return SourceConfigUtils.convert(sourceConfig, sourceDetails); + } finally { + if (shouldCloseClassLoader) { + ClassLoaderUtils.closeClassLoader(classLoader); + } + } } private File downloadPackageFile(String packageName) throws IOException, PulsarAdminException { diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/WorkerImpl.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/WorkerImpl.java index f32f6d1c7bb7d..8255061d2b420 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/WorkerImpl.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/rest/api/WorkerImpl.java @@ -19,7 +19,6 @@ package org.apache.pulsar.functions.worker.rest.api; import lombok.extern.slf4j.Slf4j; -import lombok.val; import org.apache.pulsar.client.admin.LongRunningProcessStatus; import org.apache.pulsar.common.functions.WorkerInfo; import org.apache.pulsar.common.io.ConnectorDefinition; @@ -76,10 +75,7 @@ private boolean isWorkerServiceAvailable() { if (workerService == null) { return false; } - if (!workerService.isInitialized()) { - return false; - } - return true; + return workerService.isInitialized(); } @Override @@ -234,7 +230,11 @@ public void rebalance(final URI uri, final String clientRole) { } } else { WorkerInfo workerInfo = worker().getMembershipManager().getLeader(); - URI redirect = UriBuilder.fromUri(uri).host(workerInfo.getWorkerHostname()).port(workerInfo.getPort()).build(); + if (workerInfo == null) { + throw new RestException(Status.INTERNAL_SERVER_ERROR, "Leader cannot be determined"); + } + URI redirect = + UriBuilder.fromUri(uri).host(workerInfo.getWorkerHostname()).port(workerInfo.getPort()).build(); throw new WebApplicationException(Response.temporaryRedirect(redirect).build()); } } @@ -341,6 +341,9 @@ private URI buildRedirectUriForDrainRelatedOp(final URI uri, String workerId) { // Use the leader-URI path in both cases for the redirect to the leader. String leaderPath = "admin/v2/worker/leader/drain"; WorkerInfo workerInfo = worker().getMembershipManager().getLeader(); + if (workerInfo == null) { + throw new RestException(Status.INTERNAL_SERVER_ERROR, "Leader cannot be determined"); + } URI redirect = UriBuilder.fromUri(uri) .host(workerInfo.getWorkerHostname()) .port(workerInfo.getPort()) diff --git a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/service/WorkerServiceWithClassLoader.java b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/service/WorkerServiceWithClassLoader.java index 9439d72540c8a..6959616ce3d7d 100644 --- a/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/service/WorkerServiceWithClassLoader.java +++ b/pulsar-functions/worker/src/main/java/org/apache/pulsar/functions/worker/service/WorkerServiceWithClassLoader.java @@ -25,7 +25,6 @@ import org.apache.pulsar.broker.ServiceConfiguration; import org.apache.pulsar.broker.authentication.AuthenticationService; import org.apache.pulsar.broker.authorization.AuthorizationService; -import org.apache.pulsar.broker.cache.ConfigurationCacheService; import org.apache.pulsar.broker.resources.PulsarResources; import org.apache.pulsar.common.conf.InternalConfigurationData; import org.apache.pulsar.common.nar.NarClassLoader; @@ -38,7 +37,6 @@ import org.apache.pulsar.functions.worker.service.api.Sinks; import org.apache.pulsar.functions.worker.service.api.Sources; import org.apache.pulsar.functions.worker.service.api.Workers; -import org.apache.pulsar.zookeeper.ZooKeeperCache; /** * A worker service with its classloader. diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java index dc49036a5bebb..7502e247d9908 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionActionerTest.java @@ -28,6 +28,7 @@ import static org.mockito.Mockito.verify; import org.apache.distributedlog.api.namespace.Namespace; +import org.apache.pulsar.client.admin.Packages; import org.apache.pulsar.client.admin.PulsarAdmin; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.pulsar.functions.auth.FunctionAuthProvider; @@ -220,4 +221,52 @@ public void testFunctionAuthDisabled() throws Exception { verify(functionAuthProvider.get(), times(0)).cleanUpAuthData(any(), any()); } + @Test + public void testStartFunctionWithPackageUrl() throws Exception { + + WorkerConfig workerConfig = new WorkerConfig(); + workerConfig.setWorkerId("worker-1"); + workerConfig.setFunctionRuntimeFactoryClassName(ThreadRuntimeFactory.class.getName()); + workerConfig.setFunctionRuntimeFactoryConfigs( + ObjectMapperFactory.getThreadLocal().convertValue( + new ThreadRuntimeFactoryConfig().setThreadGroupName("test"), Map.class)); + workerConfig.setPulsarServiceUrl("pulsar://localhost:6650"); + workerConfig.setStateStorageServiceUrl("foo"); + workerConfig.setFunctionAssignmentTopicName("assignments"); + String downloadDir = this.getClass().getProtectionDomain().getCodeSource().getLocation().getPath(); + workerConfig.setDownloadDirectory(downloadDir); + + RuntimeFactory factory = mock(RuntimeFactory.class); + Runtime runtime = mock(Runtime.class); + doReturn(runtime).when(factory).createContainer(any(), any(), any(), any()); + doNothing().when(runtime).start(); + Namespace dlogNamespace = mock(Namespace.class); + final String exceptionMsg = "dl namespace not-found"; + doThrow(new IllegalArgumentException(exceptionMsg)).when(dlogNamespace).openLog(any()); + PulsarAdmin pulsarAdmin = mock(PulsarAdmin.class); + Packages packages = mock(Packages.class); + doReturn(packages).when(pulsarAdmin).packages(); + doNothing().when(packages).download(any(), any()); + + @SuppressWarnings("resource") + FunctionActioner actioner = new FunctionActioner(workerConfig, factory, dlogNamespace, + new ConnectorsManager(workerConfig), new FunctionsManager(workerConfig), pulsarAdmin); + + // (1) test with file url. functionActioner should be able to consider file-url and it should be able to call + // RuntimeSpawner + String pkgPathLocation = "function://public/default/test-function@latest"; + Function.FunctionMetaData function1 = Function.FunctionMetaData.newBuilder() + .setFunctionDetails(Function.FunctionDetails.newBuilder().setTenant("test-tenant") + .setNamespace("test-namespace").setName("func-1")) + .setPackageLocation(PackageLocationMetaData.newBuilder().setPackagePath(pkgPathLocation).build()) + .build(); + Function.Instance instance = Function.Instance.newBuilder().setFunctionMetaData(function1).setInstanceId(0) + .build(); + FunctionRuntimeInfo functionRuntimeInfo = mock(FunctionRuntimeInfo.class); + doReturn(instance).when(functionRuntimeInfo).getFunctionInstance(); + + actioner.startFunction(functionRuntimeInfo); + verify(runtime, times(1)).start(); + } + } diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionMetaDataManagerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionMetaDataManagerTest.java index e5221bdd6611b..f7d133db32117 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionMetaDataManagerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionMetaDataManagerTest.java @@ -299,7 +299,7 @@ public void testProcessRequest() throws PulsarClientException, IOException { mockPulsarClient(), ErrorNotifier.getDefaultImpl())); doReturn(true).when(functionMetaDataManager).processUpdate(any(Function.FunctionMetaData.class)); - doReturn(true).when(functionMetaDataManager).proccessDeregister(any(Function.FunctionMetaData.class)); + doReturn(true).when(functionMetaDataManager).processDeregister(any(Function.FunctionMetaData.class)); Request.ServiceRequest serviceRequest = Request.ServiceRequest.newBuilder().setServiceRequestType( @@ -324,9 +324,9 @@ public void testProcessRequest() throws PulsarClientException, IOException { doReturn(serviceRequest.toByteArray()).when(msg).getData(); functionMetaDataManager.processMetaDataTopicMessage(msg); - verify(functionMetaDataManager, times(1)).proccessDeregister( + verify(functionMetaDataManager, times(1)).processDeregister( any(Function.FunctionMetaData.class)); - verify(functionMetaDataManager).proccessDeregister(serviceRequest.getFunctionMetaData()); + verify(functionMetaDataManager).processDeregister(serviceRequest.getFunctionMetaData()); } @Test @@ -393,7 +393,7 @@ public void processDeregister() throws PulsarClientException { .setFunctionDetails(Function.FunctionDetails.newBuilder().setName("func-1") .setNamespace("namespace-1").setTenant("tenant-1")).build(); - Assert.assertFalse(functionMetaDataManager.proccessDeregister(m1)); + Assert.assertFalse(functionMetaDataManager.processDeregister(m1)); verify(functionMetaDataManager, times(0)) .setFunctionMetaData(any(Function.FunctionMetaData.class)); verify(schedulerManager, times(0)).schedule(); @@ -411,7 +411,7 @@ public void processDeregister() throws PulsarClientException { // outdated delete request try { - functionMetaDataManager.proccessDeregister(m1); + functionMetaDataManager.processDeregister(m1); Assert.assertTrue(false); } catch (IllegalArgumentException e) { Assert.assertEquals(e.getMessage(), "Delete request ignored because it is out of date. Please try again."); @@ -426,7 +426,7 @@ public void processDeregister() throws PulsarClientException { // delete now m1 = m1.toBuilder().setVersion(2).build(); - Assert.assertTrue(functionMetaDataManager.proccessDeregister(m1)); + Assert.assertTrue(functionMetaDataManager.processDeregister(m1)); verify(functionMetaDataManager, times(1)) .setFunctionMetaData(any(Function.FunctionMetaData.class)); verify(schedulerManager, times(0)).schedule(); diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java index 79871db87840d..be832236aacc2 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/FunctionRuntimeManagerTest.java @@ -946,7 +946,7 @@ public void testFunctionRuntimeFactoryConfigsBackwardsCompatibility() throws Exc WorkerConfig workerConfig = new WorkerConfig(); workerConfig.setKubernetesContainerFactory(kubernetesContainerFactory); - KubernetesRuntimeFactory mockedKubernetesRuntimeFactory = spy(new KubernetesRuntimeFactory()); + KubernetesRuntimeFactory mockedKubernetesRuntimeFactory = spy(KubernetesRuntimeFactory.class); doNothing().when(mockedKubernetesRuntimeFactory).initialize( any(WorkerConfig.class), any(AuthenticationConfig.class), @@ -1112,7 +1112,7 @@ public void testKubernetesFunctionInstancesRestart() throws Exception { WorkerConfig.KubernetesContainerFactory kubernetesContainerFactory = new WorkerConfig.KubernetesContainerFactory(); workerConfig.setKubernetesContainerFactory(kubernetesContainerFactory); - KubernetesRuntimeFactory mockedKubernetesRuntimeFactory = spy(new KubernetesRuntimeFactory()); + KubernetesRuntimeFactory mockedKubernetesRuntimeFactory = spy(KubernetesRuntimeFactory.class); doNothing().when(mockedKubernetesRuntimeFactory).initialize( any(WorkerConfig.class), any(AuthenticationConfig.class), diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/WorkerUtilsTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/WorkerUtilsTest.java index d899db1323748..b2e0f0f354cbc 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/WorkerUtilsTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/WorkerUtilsTest.java @@ -40,8 +40,13 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.fail; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import org.apache.distributedlog.DistributedLogConfiguration; public class WorkerUtilsTest { @@ -99,4 +104,18 @@ public Boolean get() { } } + + @Test + public void testDLogConfiguration() throws URISyntaxException, IOException { + // The config yml is seeded with a fake bookie config. + URL yamlUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); + WorkerConfig config = WorkerConfig.load(yamlUrl.toURI().getPath()); + + // Map the config. + DistributedLogConfiguration dlogConf = WorkerUtils.getDlogConf(config); + + // Verify the outcome. + assertEquals(dlogConf.getString("bkc.testKey"), "fakeValue", + "The bookkeeper client config mapping should apply."); + } } \ No newline at end of file diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java index 4bea15eb4190f..e2bb78484f84b 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/FunctionsImplTest.java @@ -167,7 +167,7 @@ public void setup() throws Exception { instanceConfig.setMaxBufferedTuples(1024); JavaInstanceRunnable javaInstanceRunnable = new JavaInstanceRunnable( - instanceConfig, null, null, null, null, null, null, null); + instanceConfig, null, null, null, null, null, null, null, null); CompletableFuture metricsDataCompletableFuture = new CompletableFuture(); metricsDataCompletableFuture.complete(javaInstanceRunnable.getMetrics()); Runtime runtime = mock(Runtime.class); @@ -222,7 +222,7 @@ public void testMetricsEmpty() throws PulsarClientException { instanceConfig.setMaxBufferedTuples(1024); JavaInstanceRunnable javaInstanceRunnable = new JavaInstanceRunnable( - instanceConfig, null, null, null, null, null, null, null); + instanceConfig, null, null, null, null, null, null, null, null); CompletableFuture completableFuture = new CompletableFuture(); completableFuture.complete(javaInstanceRunnable.getMetrics()); Runtime runtime = mock(Runtime.class); diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java index f37d5df1fe9a0..89fb32adb5941 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v2/FunctionApiV2ResourceTest.java @@ -1031,8 +1031,8 @@ public void testUpdateFunctionWithUrl() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); - String filePackageUrl = "file://" + fileLocation; + String fileLocation = file.getAbsolutePath().replace('\\', '/'); + String filePackageUrl = "file:///" + fileLocation; FunctionConfig functionConfig = new FunctionConfig(); functionConfig.setOutput(outputTopic); @@ -1427,10 +1427,10 @@ public void testDownloadFunctionHttpUrl() throws Exception { public void testDownloadFunctionFile() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); + String fileLocation = file.getAbsolutePath().replace('\\', '/'); String testDir = FunctionApiV2ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); FunctionsImplV2 function = new FunctionsImplV2(() -> mockedWorkerService); - StreamingOutput streamOutput = (StreamingOutput) function.downloadFunction("file://" + fileLocation, null).getEntity(); + StreamingOutput streamOutput = (StreamingOutput) function.downloadFunction("file:///" + fileLocation, null).getEntity(); File pkgFile = new File(testDir, UUID.randomUUID().toString()); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); @@ -1446,8 +1446,8 @@ public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); - String filePackageUrl = "file://" + fileLocation; + String fileLocation = file.getAbsolutePath().replace('\\', '/'); + String filePackageUrl = "file:///" + fileLocation; when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); FunctionConfig functionConfig = new FunctionConfig(); @@ -1479,8 +1479,8 @@ public void testRegisterFunctionWithConflictingFields() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); - String filePackageUrl = "file://" + fileLocation; + String fileLocation = file.getAbsolutePath().replace('\\', '/'); + String filePackageUrl = "file:///" + fileLocation; when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); diff --git a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java index d2e7aca1cfa87..837e3211368c9 100644 --- a/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java +++ b/pulsar-functions/worker/src/test/java/org/apache/pulsar/functions/worker/rest/api/v3/FunctionApiV3ResourceTest.java @@ -29,7 +29,6 @@ import static org.powermock.api.mockito.PowerMockito.doThrow; import static org.powermock.api.mockito.PowerMockito.mockStatic; import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; import com.google.common.collect.Lists; @@ -1525,7 +1524,7 @@ public void testDownloadFunctionHttpUrl() throws Exception { public void testDownloadFunctionFile() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); + String fileLocation = file.getAbsolutePath().replace('\\', '/'); String testDir = FunctionApiV3ResourceTest.class.getProtectionDomain().getCodeSource().getLocation().getPath(); PulsarWorkerService worker = mock(PulsarWorkerService.class); doReturn(true).when(worker).isInitialized(); @@ -1533,7 +1532,7 @@ public void testDownloadFunctionFile() throws Exception { when(config.isAuthorizationEnabled()).thenReturn(false); when(worker.getWorkerConfig()).thenReturn(config); FunctionsImpl function = new FunctionsImpl(() -> worker); - StreamingOutput streamOutput = function.downloadFunction("file://" + fileLocation, null, null); + StreamingOutput streamOutput = function.downloadFunction("file:///" + fileLocation, null, null); File pkgFile = new File(testDir, UUID.randomUUID().toString()); OutputStream output = new FileOutputStream(pkgFile); streamOutput.write(output); @@ -1549,8 +1548,8 @@ public void testRegisterFunctionFileUrlWithValidSinkClass() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); - String filePackageUrl = "file://" + fileLocation; + String fileLocation = file.getAbsolutePath().replace('\\', '/'); + String filePackageUrl = "file:///" + fileLocation; when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); FunctionConfig functionConfig = new FunctionConfig(); @@ -1577,8 +1576,8 @@ public void testRegisterFunctionWithConflictingFields() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); - String filePackageUrl = "file://" + fileLocation; + String fileLocation = file.getAbsolutePath().replace('\\', '/'); + String filePackageUrl = "file:///" + fileLocation; when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(true); when(mockedManager.containsFunction(eq(actualTenant), eq(actualNamespace), eq(actualName))).thenReturn(false); @@ -1601,8 +1600,8 @@ public void testCreateFunctionWithoutSettingRuntime() throws Exception { URL fileUrl = getClass().getClassLoader().getResource("test_worker_config.yml"); File file = Paths.get(fileUrl.toURI()).toFile(); - String fileLocation = file.getAbsolutePath(); - String filePackageUrl = "file://" + fileLocation; + String fileLocation = file.getAbsolutePath().replace('\\', '/'); + String filePackageUrl = "file:///" + fileLocation; when(mockedManager.containsFunction(eq(tenant), eq(namespace), eq(function))).thenReturn(false); FunctionConfig functionConfig = new FunctionConfig(); diff --git a/pulsar-io/aerospike/pom.xml b/pulsar-io/aerospike/pom.xml index f93329be2644a..c8fa82a7bca44 100644 --- a/pulsar-io/aerospike/pom.xml +++ b/pulsar-io/aerospike/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-aerospike diff --git a/pulsar-io/aws/pom.xml b/pulsar-io/aws/pom.xml index 512f0c3d0aa2c..a8a7eb3e72da2 100644 --- a/pulsar-io/aws/pom.xml +++ b/pulsar-io/aws/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-aws diff --git a/pulsar-io/batch-data-generator/pom.xml b/pulsar-io/batch-data-generator/pom.xml index 98e6b8c9039fb..640af6d592ee5 100644 --- a/pulsar-io/batch-data-generator/pom.xml +++ b/pulsar-io/batch-data-generator/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-batch-data-generator @@ -44,6 +44,12 @@ ${project.version} + + org.springframework + spring-context + ${spring-context.version} + + io.codearte.jfairy jfairy diff --git a/pulsar-io/batch-discovery-triggerers/pom.xml b/pulsar-io/batch-discovery-triggerers/pom.xml index a29cf663be57f..2c78f3d6c8024 100644 --- a/pulsar-io/batch-discovery-triggerers/pom.xml +++ b/pulsar-io/batch-discovery-triggerers/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-batch-discovery-triggerers diff --git a/pulsar-io/canal/pom.xml b/pulsar-io/canal/pom.xml index de7b1f44ac820..1b37233c813d2 100644 --- a/pulsar-io/canal/pom.xml +++ b/pulsar-io/canal/pom.xml @@ -25,13 +25,18 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 pulsar-io-canal Pulsar IO :: Canal + + 5.3.19 + 1.1.5 + + ${project.groupId} @@ -73,4 +78,4 @@ - \ No newline at end of file + diff --git a/pulsar-io/cassandra/pom.xml b/pulsar-io/cassandra/pom.xml index 9a18c5fbc4551..76b6182f75d1d 100644 --- a/pulsar-io/cassandra/pom.xml +++ b/pulsar-io/cassandra/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-cassandra diff --git a/pulsar-io/cassandra/src/main/java/org/apache/pulsar/io/cassandra/CassandraAbstractSink.java b/pulsar-io/cassandra/src/main/java/org/apache/pulsar/io/cassandra/CassandraAbstractSink.java index 874710afb4860..4cd31380f0078 100644 --- a/pulsar-io/cassandra/src/main/java/org/apache/pulsar/io/cassandra/CassandraAbstractSink.java +++ b/pulsar-io/cassandra/src/main/java/org/apache/pulsar/io/cassandra/CassandraAbstractSink.java @@ -98,7 +98,7 @@ private void createClient(String roots) { String[] hostPort = hosts[i].split(":"); b.addContactPoint(hostPort[0]); if (hostPort.length > 1) { - b.withPort(Integer.valueOf(hostPort[1])); + b.withPort(Integer.parseInt(hostPort[1])); } } cluster = b.build(); diff --git a/pulsar-io/common/pom.xml b/pulsar-io/common/pom.xml index 8a6adf9a38a1a..52e601a9cae3c 100644 --- a/pulsar-io/common/pom.xml +++ b/pulsar-io/common/pom.xml @@ -27,7 +27,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-common diff --git a/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java b/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java index efb72e82567f9..bfa3cfd1cdb7d 100644 --- a/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java +++ b/pulsar-io/common/src/main/java/org/apache/pulsar/io/common/IOConfigUtils.java @@ -18,18 +18,21 @@ */ package org.apache.pulsar.io.common; +import static org.apache.commons.lang.StringUtils.isBlank; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; -import lombok.extern.slf4j.Slf4j; -import org.apache.pulsar.common.util.Reflections; -import org.apache.pulsar.io.core.SinkContext; -import org.apache.pulsar.io.core.SourceContext; -import org.apache.pulsar.io.core.annotations.FieldDoc; - import java.lang.annotation.Annotation; import java.lang.reflect.Field; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.function.Function; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.common.util.Reflections; +import org.apache.pulsar.io.core.SinkContext; +import org.apache.pulsar.io.core.SourceContext; +import org.apache.pulsar.io.core.annotations.FieldDoc; @Slf4j public class IOConfigUtils { @@ -41,6 +44,15 @@ public static T loadWithSecrets(Map map, Class clazz, Sin return loadWithSecrets(map, clazz, secretName -> sinkContext.getSecret(secretName)); } + public static Map loadConfigFromJsonString(String config) throws JsonProcessingException { + if (!isBlank(config)) { + ObjectMapper mapper = new ObjectMapper(); + return mapper.readValue(config, new TypeReference>() { + }); + } else { + return Collections.emptyMap(); + } + } private static T loadWithSecrets(Map map, Class clazz, Function secretsGetter) { Map configs = new HashMap<>(map); diff --git a/pulsar-io/core/pom.xml b/pulsar-io/core/pom.xml index 250806edda186..269111778289b 100644 --- a/pulsar-io/core/pom.xml +++ b/pulsar-io/core/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-core diff --git a/pulsar-io/data-generator/pom.xml b/pulsar-io/data-generator/pom.xml index 0191c6b671969..b13ce73c364da 100644 --- a/pulsar-io/data-generator/pom.xml +++ b/pulsar-io/data-generator/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-data-generator diff --git a/pulsar-io/debezium/core/pom.xml b/pulsar-io/debezium/core/pom.xml index e004b0f421206..139416214a442 100644 --- a/pulsar-io/debezium/core/pom.xml +++ b/pulsar-io/debezium/core/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io-debezium - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-debezium-core @@ -66,6 +66,12 @@ org.apache.kafka connect-runtime ${kafka-client.version} + + + org.apache.kafka + kafka-log4j-appender + + diff --git a/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/DebeziumSource.java b/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/DebeziumSource.java index b9074b91bc7c5..eeb216b5d9d11 100644 --- a/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/DebeziumSource.java +++ b/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/DebeziumSource.java @@ -18,9 +18,8 @@ */ package org.apache.pulsar.io.debezium; -import io.debezium.relational.history.DatabaseHistory; import java.util.Map; - +import io.debezium.relational.history.DatabaseHistory; import io.debezium.relational.HistorizedRelationalDatabaseConnectorConfig; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.common.naming.TopicName; @@ -50,10 +49,7 @@ public static void throwExceptionIfConfigNotMatch(Map config, } public static void setConfigIfNull(Map config, String key, String value) { - Object orig = config.get(key); - if (orig == null) { - config.put(key, value); - } + config.putIfAbsent(key, value); } // namespace for output topics, default value is "tenant/namespace" @@ -81,9 +77,6 @@ public void open(Map config, SourceContext sourceContext) throws // database.history.pulsar.service.url String pulsarUrl = (String) config.get(PulsarDatabaseHistory.SERVICE_URL.name()); - if (StringUtils.isEmpty(pulsarUrl)) { - throw new IllegalArgumentException("Pulsar service URL for History Database not provided."); - } String topicNamespace = topicNamespace(sourceContext); // topic.namespace @@ -97,8 +90,11 @@ public void open(Map config, SourceContext sourceContext) throws setConfigIfNull(config, PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG, topicNamespace + "/" + sourceName + "-" + DEFAULT_OFFSET_TOPIC); - config.put(DatabaseHistory.CONFIGURATION_FIELD_PREFIX_STRING + "pulsar.client.builder", - SerDeUtils.serialize(sourceContext.getPulsarClientBuilder())); + // pass pulsar.client.builder if database.history.pulsar.service.url is not provided + if (StringUtils.isEmpty(pulsarUrl)) { + String pulsarClientBuilder = SerDeUtils.serialize(sourceContext.getPulsarClientBuilder()); + config.put(PulsarDatabaseHistory.CLIENT_BUILDER.name(), pulsarClientBuilder); + } super.open(config, sourceContext); } diff --git a/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistory.java b/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistory.java index be152a6da8eb2..00a0408873f24 100644 --- a/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistory.java +++ b/pulsar-io/debezium/core/src/main/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistory.java @@ -19,6 +19,9 @@ package org.apache.pulsar.io.debezium; import static org.apache.commons.lang.StringUtils.isBlank; +import static org.apache.pulsar.io.common.IOConfigUtils.loadConfigFromJsonString; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.google.common.annotations.VisibleForTesting; import io.debezium.annotation.ThreadSafe; import io.debezium.config.Configuration; import io.debezium.config.Field; @@ -30,6 +33,8 @@ import io.debezium.relational.history.HistoryRecord; import io.debezium.relational.history.HistoryRecordComparator; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.UUID; import java.util.function.Consumer; import lombok.extern.slf4j.Slf4j; @@ -77,14 +82,26 @@ public final class PulsarDatabaseHistory extends AbstractDatabaseHistory { .withDescription("Pulsar client builder") .withValidation(Field::isOptional); - public static Field.Set ALL_FIELDS = Field.setOf( + public static final Field READER_CONFIG = Field.create(CONFIGURATION_FIELD_PREFIX_STRING + "pulsar.reader.config") + .withDisplayName("Extra configs of the reader") + .withType(Type.STRING) + .withWidth(Width.LONG) + .withImportance(Importance.HIGH) + .withDescription("The configs of the reader for the database schema history topic, " + + "in the form of a JSON string with key-value pairs") + .withDefault((String) null) + .withValidation(Field::isOptional); + + public static final Field.Set ALL_FIELDS = Field.setOf( TOPIC, SERVICE_URL, CLIENT_BUILDER, - DatabaseHistory.NAME); + DatabaseHistory.NAME, + READER_CONFIG); private final DocumentReader reader = DocumentReader.defaultReader(); private String topicName; + private Map readerConfigMap = new HashMap<>(); private String dbHistoryName; private ClientBuilder clientBuilder; private volatile PulsarClient pulsarClient; @@ -102,13 +119,19 @@ public void configure( + getClass().getSimpleName() + "; check the logs for details"); } this.topicName = config.getString(TOPIC); + try { + this.readerConfigMap = loadConfigFromJsonString(config.getString(READER_CONFIG)); + } catch (JsonProcessingException exception) { + log.warn("The provided reader configs are invalid, " + + "will not passing any extra config to the reader builder.", exception); + } - if (config.getString(CLIENT_BUILDER) == null && config.getString(SERVICE_URL) == null) { + String clientBuilderBase64Encoded = config.getString(CLIENT_BUILDER); + if (isBlank(clientBuilderBase64Encoded) && isBlank(config.getString(SERVICE_URL))) { throw new IllegalArgumentException("Neither Pulsar Service URL nor ClientBuilder provided."); } - String clientBuilderBase64Encoded = config.getString(CLIENT_BUILDER); this.clientBuilder = PulsarClient.builder(); - if (null != clientBuilderBase64Encoded) { + if (!isBlank(clientBuilderBase64Encoded)) { // deserialize the client builder to the same classloader this.clientBuilder = (ClientBuilder) SerDeUtils.deserialize(clientBuilderBase64Encoded, this.clientBuilder.getClass().getClassLoader()); } else { @@ -209,11 +232,7 @@ public void stop() { @Override protected void recoverRecords(Consumer records) { setupClientIfNeeded(); - try (Reader historyReader = pulsarClient.newReader(Schema.STRING) - .topic(topicName) - .startMessageId(MessageId.earliest) - .create() - ) { + try (Reader historyReader = createHistoryReader()) { log.info("Scanning the database history topic '{}'", topicName); // Read all messages in the topic ... @@ -256,11 +275,7 @@ protected void recoverRecords(Consumer records) { @Override public boolean exists() { setupClientIfNeeded(); - try (Reader historyReader = pulsarClient.newReader(Schema.STRING) - .topic(topicName) - .startMessageId(MessageId.earliest) - .create() - ) { + try (Reader historyReader = createHistoryReader()) { return historyReader.hasMessageAvailable(); } catch (IOException e) { log.error("Encountered issues on checking existence of database history", e); @@ -280,4 +295,13 @@ public String toString() { } return "Pulsar topic"; } + + @VisibleForTesting + Reader createHistoryReader() throws PulsarClientException { + return pulsarClient.newReader(Schema.STRING) + .topic(topicName) + .startMessageId(MessageId.earliest) + .loadConf(readerConfigMap) + .create(); + } } diff --git a/pulsar-io/debezium/core/src/test/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistoryTest.java b/pulsar-io/debezium/core/src/test/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistoryTest.java index 04334da5e435b..081cfdcc5435a 100644 --- a/pulsar-io/debezium/core/src/test/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistoryTest.java +++ b/pulsar-io/debezium/core/src/test/java/org/apache/pulsar/io/debezium/PulsarDatabaseHistoryTest.java @@ -21,6 +21,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import io.debezium.config.Configuration; import io.debezium.connector.mysql.antlr.MySqlAntlrDdlParser; @@ -34,12 +35,14 @@ import java.io.ByteArrayOutputStream; import java.io.ObjectOutputStream; import java.util.Base64; +import java.util.List; import java.util.Map; import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.Producer; import org.apache.pulsar.client.api.ProducerConsumerBase; import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.Reader; import org.apache.pulsar.client.api.Schema; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; @@ -74,7 +77,7 @@ protected void cleanup() throws Exception { super.internalCleanup(); } - private void testHistoryTopicContent(boolean skipUnparseableDDL, boolean testWithClientBuilder) throws Exception { + private void testHistoryTopicContent(boolean skipUnparseableDDL, boolean testWithClientBuilder, boolean testWithReaderConfig) throws Exception { Configuration.Builder configBuidler = Configuration.create() .with(PulsarDatabaseHistory.TOPIC, topicName) .with(DatabaseHistory.NAME, "my-db-history") @@ -93,6 +96,10 @@ private void testHistoryTopicContent(boolean skipUnparseableDDL, boolean testWit configBuidler.with(PulsarDatabaseHistory.SERVICE_URL, brokerUrl.toString()); } + if (testWithReaderConfig) { + configBuidler.with(PulsarDatabaseHistory.READER_CONFIG, "{\"subscriptionName\":\"my-subscription\"}"); + } + // Start up the history ... history.configure(configBuidler.build(), null, DatabaseHistoryListener.NOOP, true); history.start(); @@ -122,8 +129,8 @@ private void testHistoryTopicContent(boolean skipUnparseableDDL, boolean testWit // Now record schema changes, which writes out to kafka but doesn't actually change the Tables ... setLogPosition(10); ddl = "CREATE TABLE foo ( first VARCHAR(22) NOT NULL ); \n" + - "CREATE TABLE customers ( id INTEGER NOT NULL PRIMARY KEY, name VARCHAR(100) NOT NULL ); \n" + - "CREATE TABLE products ( productId INTEGER NOT NULL PRIMARY KEY, description VARCHAR(255) NOT NULL ); \n"; + "CREATE TABLE customers ( id INTEGER NOT NULL PRIMARY KEY, name VARCHAR(100) NOT NULL ); \n" + + "CREATE TABLE products ( productId INTEGER NOT NULL PRIMARY KEY, description VARCHAR(255) NOT NULL ); \n"; history.record(source, position, "db1", ddl); // Parse the DDL statement 3x and each time update a different Tables object ... @@ -181,6 +188,10 @@ private void testHistoryTopicContent(boolean skipUnparseableDDL, boolean testWit assertEquals(recoveredTables, tables3); } + private void testHistoryTopicContent(boolean skipUnparseableDDL, boolean testWithClientBuilder) throws Exception { + testHistoryTopicContent(skipUnparseableDDL, testWithClientBuilder, false); + } + protected void setLogPosition(int index) { this.position = Collect.hashMapOf("filename", "my-txn-file.log", "position", index); @@ -239,4 +250,17 @@ public void testExists() throws Exception { // dummytopic should not exist yet assertFalse(history.exists()); } + + @Test + public void testSubscriptionName() throws Exception { + testHistoryTopicContent(true, false, true); + assertTrue(history.exists()); + try (Reader ignored = history.createHistoryReader()) { + List subscriptions = admin.topics().getSubscriptions(topicName); + assertEquals(subscriptions.size(), 1); + assertTrue(subscriptions.contains("my-subscription")); + } catch (Exception e) { + fail("Failed to create history reader"); + } + } } diff --git a/pulsar-io/debezium/mongodb/pom.xml b/pulsar-io/debezium/mongodb/pom.xml index 7c0f8bcf48f2f..5dada914d85bb 100644 --- a/pulsar-io/debezium/mongodb/pom.xml +++ b/pulsar-io/debezium/mongodb/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io-debezium - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-debezium-mongodb diff --git a/pulsar-io/debezium/mssql/pom.xml b/pulsar-io/debezium/mssql/pom.xml index 911ddac11edc1..afd23135eece4 100644 --- a/pulsar-io/debezium/mssql/pom.xml +++ b/pulsar-io/debezium/mssql/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io-debezium - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-debezium-mssql diff --git a/pulsar-io/debezium/mysql/pom.xml b/pulsar-io/debezium/mysql/pom.xml index f044b53e3e598..f2af3781b3ea9 100644 --- a/pulsar-io/debezium/mysql/pom.xml +++ b/pulsar-io/debezium/mysql/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io-debezium - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-debezium-mysql diff --git a/pulsar-io/debezium/oracle/pom.xml b/pulsar-io/debezium/oracle/pom.xml index 950c46a7ff861..56caeb765a832 100644 --- a/pulsar-io/debezium/oracle/pom.xml +++ b/pulsar-io/debezium/oracle/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io-debezium - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-debezium-oracle diff --git a/pulsar-io/debezium/pom.xml b/pulsar-io/debezium/pom.xml index d0fafcb4fa063..b50799ac9d4e1 100644 --- a/pulsar-io/debezium/pom.xml +++ b/pulsar-io/debezium/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-debezium diff --git a/pulsar-io/debezium/postgres/pom.xml b/pulsar-io/debezium/postgres/pom.xml index 69f4c93f8594d..4986fb547a805 100644 --- a/pulsar-io/debezium/postgres/pom.xml +++ b/pulsar-io/debezium/postgres/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io-debezium - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-debezium-postgres @@ -44,6 +44,13 @@ ${debezium.version} + + org.postgresql + postgresql + ${debezium.postgresql.version} + runtime + + diff --git a/pulsar-io/docs/pom.xml b/pulsar-io/docs/pom.xml index 03b5114883283..a70335b402955 100644 --- a/pulsar-io/docs/pom.xml +++ b/pulsar-io/docs/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-docs diff --git a/pulsar-io/dynamodb/pom.xml b/pulsar-io/dynamodb/pom.xml index 5737b50cab58e..67d4450ff2729 100644 --- a/pulsar-io/dynamodb/pom.xml +++ b/pulsar-io/dynamodb/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-dynamodb diff --git a/pulsar-io/elastic-search/pom.xml b/pulsar-io/elastic-search/pom.xml index 43a82c6f07071..544119ae259e0 100644 --- a/pulsar-io/elastic-search/pom.xml +++ b/pulsar-io/elastic-search/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-elastic-search Pulsar IO :: ElasticSearch @@ -52,7 +52,6 @@ com.fasterxml.jackson.core jackson-databind - ${jackson.databind.version} diff --git a/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClient.java b/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClient.java index af66795ef6ab5..b9e756e24d30c 100644 --- a/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClient.java +++ b/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClient.java @@ -254,16 +254,27 @@ void hasIrrecoverableError(BulkItemResponse bulkItemResponse) throws Exception { } } + IndexRequest makeIndexRequest(Record record, Pair idAndDoc) throws IOException { + IndexRequest indexRequest = Requests.indexRequest(indexName(record.getTopicName())); + if (!Strings.isNullOrEmpty(idAndDoc.getLeft())) + indexRequest.id(idAndDoc.getLeft()); + indexRequest.type(config.getTypeName()); + indexRequest.source(idAndDoc.getRight(), XContentType.JSON); + return indexRequest; + } + + DeleteRequest makeDeleteRequest(Record record, String id) throws IOException { + DeleteRequest deleteRequest = Requests.deleteRequest(indexName(record.getTopicName())); + deleteRequest.id(id); + deleteRequest.type(config.getTypeName()); + return deleteRequest; + } + public void bulkIndex(Record record, Pair idAndDoc) throws Exception { try { checkNotFailed(); checkIndexExists(record.getTopicName()); - IndexRequest indexRequest = Requests.indexRequest(config.getIndexName()); - if (!Strings.isNullOrEmpty(idAndDoc.getLeft())) - indexRequest.id(idAndDoc.getLeft()); - indexRequest.type(config.getTypeName()); - indexRequest.source(idAndDoc.getRight(), XContentType.JSON); - + IndexRequest indexRequest = makeIndexRequest(record, idAndDoc); records.put(indexRequest, record); bulkProcessor.add(indexRequest); } catch(Exception e) { @@ -284,12 +295,7 @@ public boolean indexDocument(Record record, Pair try { checkNotFailed(); checkIndexExists(record.getTopicName()); - IndexRequest indexRequest = Requests.indexRequest(config.getIndexName()); - if (!Strings.isNullOrEmpty(idAndDoc.getLeft())) - indexRequest.id(idAndDoc.getLeft()); - indexRequest.type(config.getTypeName()); - indexRequest.source(idAndDoc.getRight(), XContentType.JSON); - IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); + IndexResponse indexResponse = client.index(makeIndexRequest(record, idAndDoc), RequestOptions.DEFAULT); if (indexResponse.getResult().equals(DocWriteResponse.Result.CREATED) || indexResponse.getResult().equals(DocWriteResponse.Result.UPDATED)) { record.ack(); @@ -309,10 +315,7 @@ public void bulkDelete(Record record, String id) throws Exception try { checkNotFailed(); checkIndexExists(record.getTopicName()); - DeleteRequest deleteRequest = Requests.deleteRequest(config.getIndexName()); - deleteRequest.id(id); - deleteRequest.type(config.getTypeName()); - + DeleteRequest deleteRequest = makeDeleteRequest(record, id); records.put(deleteRequest, record); bulkProcessor.add(deleteRequest); } catch(Exception e) { @@ -333,10 +336,7 @@ public boolean deleteDocument(Record record, String id) throws Ex try { checkNotFailed(); checkIndexExists(record.getTopicName()); - DeleteRequest deleteRequest = Requests.deleteRequest(config.getIndexName()); - deleteRequest.id(id); - deleteRequest.type(config.getTypeName()); - DeleteResponse deleteResponse = client.delete(deleteRequest, RequestOptions.DEFAULT); + DeleteResponse deleteResponse = client.delete(makeDeleteRequest(record, id), RequestOptions.DEFAULT); log.debug("delete result=" + deleteResponse.getResult()); if (deleteResponse.getResult().equals(DocWriteResponse.Result.DELETED) || deleteResponse.getResult().equals(DocWriteResponse.Result.NOT_FOUND)) { diff --git a/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchConfig.java b/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchConfig.java index 7dbfd0388a4b4..dc6d0d415a047 100644 --- a/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchConfig.java +++ b/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchConfig.java @@ -79,14 +79,14 @@ public class ElasticSearchConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "true", + defaultValue = "false", help = "Create the index if it does not exist" ) private boolean createIndexIfNeeded = false; @FieldDoc( required = false, - defaultValue = "1", + defaultValue = "0", help = "The number of replicas of the index" ) private int indexNumberOfReplicas = 0; @@ -109,7 +109,7 @@ public class ElasticSearchConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "-1", + defaultValue = "1", help = "The maximum number of retries for elasticsearch requests. Use -1 to disable it." ) private int maxRetries = 1; @@ -216,7 +216,7 @@ public class ElasticSearchConfig implements Serializable { @FieldDoc( required = false, - defaultValue = "id", + defaultValue = "", help = "The comma separated ordered list of field names used to build the Elasticsearch document _id from the record value. If this list is a singleton, the field is converted as a string. If this list has 2 or more fields, the generated _id is a string representation of a JSON array of the field values." ) private String primaryFields = ""; diff --git a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchBWCTests.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchBWCTests.java index 7b24a7ccb7494..86b9df3229f50 100644 --- a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchBWCTests.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchBWCTests.java @@ -23,7 +23,7 @@ import org.apache.pulsar.client.api.schema.GenericObject; import org.apache.pulsar.common.schema.SchemaType; import org.apache.pulsar.functions.api.Record; -import org.junit.Test; +import org.testng.annotations.Test; import java.nio.charset.StandardCharsets; diff --git a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientSslTests.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientSslTests.java index 9f878aad1ad17..325f69c7f73f9 100644 --- a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientSslTests.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientSslTests.java @@ -25,17 +25,12 @@ import java.io.IOException; import java.time.Duration; -import java.util.Optional; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; // see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html#ssl-tls-settings -public class ElasticSearchClientSslTests { - - public static final String ELASTICSEARCH_IMAGE = Optional.ofNullable(System.getenv("ELASTICSEARCH_IMAGE")) - .orElse("docker.elastic.co/elasticsearch/elasticsearch:7.10.2-amd64"); +public class ElasticSearchClientSslTests extends ElasticSearchTestBase { final static String INDEX = "myindex"; @@ -44,7 +39,7 @@ public class ElasticSearchClientSslTests { @Test public void testSslBasic() throws IOException { - try(ElasticsearchContainer container = new ElasticsearchContainer(ELASTICSEARCH_IMAGE) + try (ElasticsearchContainer container = createElasticsearchContainer() .withCreateContainerCmdModifier(c -> c.withName("elasticsearch")) .withFileSystemBind(sslResourceDir, configDir + "/ssl") .withEnv("ELASTIC_PASSWORD","elastic") // boostrap password @@ -80,7 +75,7 @@ public void testSslBasic() throws IOException { @Test public void testSslWithHostnameVerification() throws IOException { - try(ElasticsearchContainer container = new ElasticsearchContainer(ELASTICSEARCH_IMAGE) + try (ElasticsearchContainer container = createElasticsearchContainer() .withCreateContainerCmdModifier(c -> c.withName("elasticsearch")) .withFileSystemBind(sslResourceDir, configDir + "/ssl") .withEnv("ELASTIC_PASSWORD","elastic") // boostrap password @@ -119,7 +114,7 @@ public void testSslWithHostnameVerification() throws IOException { @Test public void testSslWithClientAuth() throws IOException { - try(ElasticsearchContainer container = new ElasticsearchContainer(ELASTICSEARCH_IMAGE) + try(ElasticsearchContainer container = createElasticsearchContainer() .withCreateContainerCmdModifier(c -> c.withName("elasticsearch")) .withFileSystemBind(sslResourceDir, configDir + "/ssl") .withEnv("ELASTIC_PASSWORD","elastic") // boostrap password @@ -157,7 +152,7 @@ public void testSslWithClientAuth() throws IOException { } - public void testIndexExists(ElasticSearchClient client) throws IOException { + private void testIndexExists(ElasticSearchClient client) throws IOException { assertFalse(client.indexExists("mynewindex")); assertTrue(client.createIndexIfNeeded("mynewindex")); assertTrue(client.indexExists("mynewindex")); diff --git a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientTests.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientTests.java index aeacaf85ff1f6..1a505dae31af2 100644 --- a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientTests.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchClientTests.java @@ -24,8 +24,11 @@ import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.io.elasticsearch.testcontainers.ChaosContainer; import org.awaitility.Awaitility; -import org.junit.AfterClass; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.mockito.Mockito; import org.testcontainers.elasticsearch.ElasticsearchContainer; +import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; @@ -33,27 +36,28 @@ import java.util.Optional; import java.util.UUID; -import static org.junit.Assert.*; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.when; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.*; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertThrows; +import static org.testng.Assert.assertTrue; @Slf4j -public class ElasticSearchClientTests { - - public static final String ELASTICSEARCH_IMAGE = Optional.ofNullable(System.getenv("ELASTICSEARCH_IMAGE")) - .orElse("docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2-amd64"); +public class ElasticSearchClientTests extends ElasticSearchTestBase { static ElasticsearchContainer container; @BeforeClass public static final void initBeforeClass() throws IOException { - container = new ElasticsearchContainer(ELASTICSEARCH_IMAGE); + container = createElasticsearchContainer(); container.start(); } - @AfterClass + @AfterClass(alwaysRun = true) public static void closeAfterClass() { container.close(); } @@ -78,6 +82,44 @@ public void fail() { } } + @Test + public void testIndexRequest() throws Exception { + String index = "myindex-" + UUID.randomUUID(); + Record record = Mockito.mock(Record.class); + String topicName = "topic-" + UUID.randomUUID(); + when(record.getTopicName()).thenReturn(Optional.of(topicName)); + try (ElasticSearchClient client = new ElasticSearchClient(new ElasticSearchConfig() + .setElasticSearchUrl("http://" + container.getHttpHostAddress()) + .setIndexName(index))) { + IndexRequest request = client.makeIndexRequest(record, Pair.of("1", "{ \"a\":1}")); + assertEquals(request.index(), index); + } + try (ElasticSearchClient client = new ElasticSearchClient(new ElasticSearchConfig() + .setElasticSearchUrl("http://" + container.getHttpHostAddress()))) { + IndexRequest request = client.makeIndexRequest(record, Pair.of("1", "{ \"a\":1}")); + assertEquals(request.index(), topicName); + } + } + + @Test + public void testDeleteRequest() throws Exception { + String index = "myindex-" + UUID.randomUUID(); + Record record = Mockito.mock(Record.class); + String topicName = "topic-" + UUID.randomUUID(); + when(record.getTopicName()).thenReturn(Optional.of(topicName)); + try (ElasticSearchClient client = new ElasticSearchClient(new ElasticSearchConfig() + .setElasticSearchUrl("http://" + container.getHttpHostAddress()) + .setIndexName(index))) { + DeleteRequest request = client.makeDeleteRequest(record, "1"); + assertEquals(request.index(), index); + } + try (ElasticSearchClient client = new ElasticSearchClient(new ElasticSearchConfig() + .setElasticSearchUrl("http://" + container.getHttpHostAddress()))) { + DeleteRequest request = client.makeDeleteRequest(record, "1"); + assertEquals(request.index(), topicName); + } + } + @Test public void testIndexDelete() throws Exception { String index = "myindex-" + UUID.randomUUID(); @@ -201,7 +243,7 @@ public void testBulkRetry() throws Exception { assertEquals(mockRecord.failed, 0); assertEquals(client.totalHits(index), 2); - ChaosContainer chaosContainer = new ChaosContainer<>(container.getContainerName(), "15s"); + ChaosContainer chaosContainer = ChaosContainer.pauseContainerForSeconds(container.getContainerName(), 15); chaosContainer.start(); client.bulkIndex(mockRecord, Pair.of("3", "{\"a\":3}")); @@ -248,12 +290,12 @@ public void testBulkBlocking() throws Exception { }); client.flush(); Awaitility.await().untilAsserted(() -> { - assertEquals(mockRecord.acked, 5); assertEquals(mockRecord.failed, 0); + assertEquals(mockRecord.acked, 5); assertEquals(client.totalHits(index), 5); }); - ChaosContainer chaosContainer = new ChaosContainer<>(container.getContainerName(), "30s"); + ChaosContainer chaosContainer = ChaosContainer.pauseContainerForSeconds(container.getContainerName(), 30); chaosContainer.start(); Thread.sleep(1000L); diff --git a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchExtractTests.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchExtractTests.java index 89d686c6edd82..b0d26d92d53b7 100644 --- a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchExtractTests.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchExtractTests.java @@ -18,8 +18,10 @@ */ package org.apache.pulsar.io.elasticsearch; -import com.google.common.collect.ImmutableList; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; import com.google.common.collect.ImmutableMap; +import java.util.Optional; import org.apache.commons.lang3.tuple.Pair; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.schema.GenericObject; @@ -31,33 +33,18 @@ import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.common.schema.SchemaType; import org.apache.pulsar.functions.api.Record; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; -import java.util.Collection; -import java.util.Optional; - -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertNull; - -@RunWith(Parameterized.class) public class ElasticSearchExtractTests { - SchemaType schemaType; - - @Parameters - public static Collection schemaTypes() { - return ImmutableList.of(SchemaType.JSON, SchemaType.AVRO); - } - - public ElasticSearchExtractTests(SchemaType schemaType) { - this.schemaType = schemaType; + @DataProvider(name = "schemaType") + public Object[] schemaType() { + return new Object[]{SchemaType.JSON, SchemaType.AVRO}; } - @Test - public void testGenericRecord() throws Exception { + @Test(dataProvider = "schemaType") + public void testGenericRecord(SchemaType schemaType) throws Exception { RecordSchemaBuilder valueSchemaBuilder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("value"); valueSchemaBuilder.field("c").type(SchemaType.STRING).optional().defaultValue(null); valueSchemaBuilder.field("d").type(SchemaType.INT32).optional().defaultValue(null); @@ -154,8 +141,8 @@ public GenericObject getValue() { assertNull(pair4.getRight()); } - @Test - public void testKeyValueGenericRecord() throws Exception { + @Test(dataProvider = "schemaType") + public void testKeyValueGenericRecord(SchemaType schemaType) throws Exception { RecordSchemaBuilder keySchemaBuilder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("key"); keySchemaBuilder.field("a").type(SchemaType.STRING).optional().defaultValue(null); keySchemaBuilder.field("b").type(SchemaType.INT32).optional().defaultValue(null); @@ -273,4 +260,4 @@ public Object getNativeObject() { assertEquals(pair3.getLeft(), "[\"1\",1]"); assertNull(pair3.getRight()); } -} +} \ No newline at end of file diff --git a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkRawDataTests.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkRawDataTests.java index f195a383d4073..018a11e5aa091 100644 --- a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkRawDataTests.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkRawDataTests.java @@ -23,11 +23,11 @@ import org.apache.pulsar.client.api.schema.GenericObject; import org.apache.pulsar.functions.api.Record; import org.apache.pulsar.io.core.SinkContext; -import org.junit.AfterClass; import org.mockito.Mock; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.testcontainers.elasticsearch.ElasticsearchContainer; +import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; @@ -42,13 +42,8 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertNull; -public class ElasticSearchSinkRawDataTests { - - public static final String ELASTICSEARCH_IMAGE = Optional.ofNullable(System.getenv("ELASTICSEARCH_IMAGE")) - .orElse("docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2-amd64"); +public class ElasticSearchSinkRawDataTests extends ElasticSearchTestBase { private static ElasticsearchContainer container; @@ -67,11 +62,11 @@ public class ElasticSearchSinkRawDataTests { @BeforeClass public static final void initBeforeClass() { - container = new ElasticsearchContainer(ELASTICSEARCH_IMAGE); + container = createElasticsearchContainer(); schema = Schema.BYTES; } - @AfterClass + @AfterClass(alwaysRun = true) public static void closeAfterClass() { container.close(); } diff --git a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkTests.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkTests.java index 9e3363c84d76c..c8c500a3bb377 100644 --- a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkTests.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchSinkTests.java @@ -54,10 +54,7 @@ import java.util.Locale; import static org.testng.Assert.assertNull; -public class ElasticSearchSinkTests { - - public static final String ELASTICSEARCH_IMAGE = Optional.ofNullable(System.getenv("ELASTICSEARCH_IMAGE")) - .orElse("docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2-amd64"); +public class ElasticSearchSinkTests extends ElasticSearchTestBase { private static ElasticsearchContainer container; @@ -76,7 +73,7 @@ public class ElasticSearchSinkTests { @BeforeClass public static final void initBeforeClass() { - container = new ElasticsearchContainer(ELASTICSEARCH_IMAGE); + container = createElasticsearchContainer(); valueSchema = Schema.JSON(UserProfile.class); genericSchema = Schema.generic(valueSchema.getSchemaInfo()); @@ -89,7 +86,7 @@ public static final void initBeforeClass() { } - @AfterClass + @AfterClass(alwaysRun = true) public static void closeAfterClass() { container.close(); } @@ -292,7 +289,7 @@ public void testNullValueDelete() throws Exception { testNullValue(ElasticSearchConfig.NullValueAction.DELETE); } - public void testNullValue(ElasticSearchConfig.NullValueAction action) throws Exception { + private void testNullValue(ElasticSearchConfig.NullValueAction action) throws Exception { String index = "testnullvalue" + action.toString().toLowerCase(Locale.ROOT); map.put("indexName", index); map.put("keyIgnore", "false"); diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/UnsupportedTxnActionException.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchTestBase.java similarity index 58% rename from pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/UnsupportedTxnActionException.java rename to pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchTestBase.java index e4a83410c0159..34e3fc21abb40 100644 --- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/transaction/buffer/exceptions/UnsupportedTxnActionException.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/ElasticSearchTestBase.java @@ -16,19 +16,21 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.broker.transaction.buffer.exceptions; +package org.apache.pulsar.io.elasticsearch; -import org.apache.pulsar.client.api.transaction.TxnID; -import org.apache.pulsar.common.api.proto.TxnAction; +import org.testcontainers.elasticsearch.ElasticsearchContainer; -/** - * Exceptions are thrown when txnAction is unsupported. - */ -public class UnsupportedTxnActionException extends TransactionBufferException { +import java.util.Optional; + +public class ElasticSearchTestBase { - private static final long serialVersionUID = 0L; + private static final String ELASTICSEARCH_IMAGE = Optional.ofNullable(System.getenv("ELASTICSEARCH_IMAGE")) + .orElse("docker.elastic.co/elasticsearch/elasticsearch:7.16.3-amd64"); + + protected static ElasticsearchContainer createElasticsearchContainer() { + return new ElasticsearchContainer(ELASTICSEARCH_IMAGE) + .withEnv("ES_JAVA_OPTS", "-Xms128m -Xmx256m"); - public UnsupportedTxnActionException(TxnID txnId, int txnAction) { - super("Transaction `" + txnId + "` receive unsupported txnAction " + TxnAction.valueOf(txnAction)); } + } diff --git a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/testcontainers/ChaosContainer.java b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/testcontainers/ChaosContainer.java index 7e7734f020c3e..4b296bb10137d 100644 --- a/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/testcontainers/ChaosContainer.java +++ b/pulsar-io/elastic-search/src/test/java/org/apache/pulsar/io/elasticsearch/testcontainers/ChaosContainer.java @@ -19,26 +19,63 @@ package org.apache.pulsar.io.elasticsearch.testcontainers; import lombok.extern.slf4j.Slf4j; +import org.awaitility.Awaitility; import org.testcontainers.containers.BindMode; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.containers.wait.strategy.WaitStrategy; +import java.util.ArrayList; +import java.util.List; import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Predicate; // see https://github.com/alexei-led/pumba @Slf4j public class ChaosContainer> extends GenericContainer { - public static final String PUMBA_IMAGE = Optional.ofNullable(System.getenv("PUMBA_IMAGE")) - .orElse("gaiaadm/pumba:latest"); - - public ChaosContainer(String targetContainer, String pause) { - super(PUMBA_IMAGE); - setCommand("--log-level info --interval 60s pause --duration " + pause + " " + targetContainer); - addFileSystemBind("/var/run/docker.sock", "/var/run/docker.sock", BindMode.READ_WRITE); - setWaitStrategy(Wait.forLogMessage(".*pausing container.*", 1)); - withLogConsumer(o -> { - log.info("pumba> {}", o.getUtf8String()); - }); - } + public static final String PUMBA_IMAGE = Optional.ofNullable(System.getenv("PUMBA_IMAGE")) + .orElse("gaiaadm/pumba:0.8.0"); + + private final List logs = new ArrayList<>(); + private Consumer beforeStop; + + public static ChaosContainer pauseContainerForSeconds(String targetContainer, int seconds) { + return new ChaosContainer(targetContainer, "pause --duration " + seconds + "s", Wait.forLogMessage(".*pausing container.*", 1), + (Consumer) chaosContainer -> Awaitility + .await() + .atMost(seconds + 5, TimeUnit.SECONDS) + .until(() -> { + boolean found = chaosContainer.logs.stream().anyMatch((Predicate) line -> line.contains("stop pausing container")); + if (!found) { + log.debug("ChaosContainer stop requested. waiting for \"stop pausing container\" log"); + log.debug(String.join("\n", chaosContainer.logs)); + } + return found; + } + )); + } + + private ChaosContainer(String targetContainer, String command, WaitStrategy waitStrategy, Consumer beforeStop) { + super(PUMBA_IMAGE); + setCommand("--log-level info " + command + " " + targetContainer); + addFileSystemBind("/var/run/docker.sock", "/var/run/docker.sock", BindMode.READ_WRITE); + setWaitStrategy(waitStrategy); + withLogConsumer(o -> { + final String string = o.getUtf8String(); + log.info("pumba> {}", string); + logs.add(string); + }); + this.beforeStop = beforeStop; + } + + @Override + public void stop() { + if (getContainerId() != null && beforeStop != null) { + beforeStop.accept(this); + } + super.stop(); + } } \ No newline at end of file diff --git a/pulsar-io/file/pom.xml b/pulsar-io/file/pom.xml index 8b1af8083c1ac..25e2af6268cef 100644 --- a/pulsar-io/file/pom.xml +++ b/pulsar-io/file/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-file diff --git a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileListingThread.java b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileListingThread.java index a13b923c25c47..4067141bf8bdc 100644 --- a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileListingThread.java +++ b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileListingThread.java @@ -106,10 +106,10 @@ public void run() { private Set performListing(final File directory, final FileFilter filter, final boolean recurseSubdirectories) { Path p = directory.toPath(); - if (!Files.isWritable(p) || !Files.isReadable(p)) { - throw new IllegalStateException("Directory '" + directory - + "' does not have sufficient permissions (i.e., not writable and readable)"); + if (!Files.isReadable(p)) { + throw new IllegalStateException("Cannot read directory: '" + directory); } + final Set queue = new HashSet<>(); if (!directory.exists()) { return queue; diff --git a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java index bc09c978dd621..3a51736cc2a36 100644 --- a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java +++ b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java @@ -58,13 +58,15 @@ public void open(Map config, SourceContext sourceContext) throws @Override public void close() throws Exception { - executor.shutdown(); - try { - if (!executor.awaitTermination(800, TimeUnit.MILLISECONDS)) { + if (executor != null) { + executor.shutdown(); + try { + if (!executor.awaitTermination(800, TimeUnit.MILLISECONDS)) { + executor.shutdownNow(); + } + } catch (InterruptedException e) { executor.shutdownNow(); } - } catch (InterruptedException e) { - executor.shutdownNow(); } } } diff --git a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSourceConfig.java b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSourceConfig.java index 24835e0e3141b..a43afc44c7992 100644 --- a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSourceConfig.java +++ b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSourceConfig.java @@ -129,7 +129,7 @@ public void validate() { throw new IllegalArgumentException("Specified input directory does not exist"); } else if (!Files.isReadable(Paths.get(inputDirectory))) { throw new IllegalArgumentException("Specified input directory is not readable"); - } else if (Optional.ofNullable(keepFile).orElse(false) && !Files.isWritable(Paths.get(inputDirectory))) { + } else if (!Optional.ofNullable(keepFile).orElse(false) && !Files.isWritable(Paths.get(inputDirectory))) { throw new IllegalArgumentException("You have requested the consumed files to be deleted, but the " + "source directory is not writeable."); } @@ -166,4 +166,4 @@ public void validate() { throw new IllegalArgumentException("The property numWorkers must be greater than zero"); } } -} \ No newline at end of file +} diff --git a/pulsar-io/file/src/test/java/org/apache/pulsar/io/file/FileSourceConfigTests.java b/pulsar-io/file/src/test/java/org/apache/pulsar/io/file/FileSourceConfigTests.java index 64144e667adeb..4a4d8d2a86713 100644 --- a/pulsar-io/file/src/test/java/org/apache/pulsar/io/file/FileSourceConfigTests.java +++ b/pulsar-io/file/src/test/java/org/apache/pulsar/io/file/FileSourceConfigTests.java @@ -18,7 +18,9 @@ */ package org.apache.pulsar.io.file; +import static org.junit.Assert.assertFalse; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; import java.io.File; import java.io.IOException; @@ -29,6 +31,8 @@ public class FileSourceConfigTests { + private final static String INPUT_DIRECTORY = "/dev/null"; + @Test public final void loadFromYamlFileTest() throws IOException { File yamlFile = getFile("sinkConfig.yaml"); @@ -39,7 +43,7 @@ public final void loadFromYamlFileTest() throws IOException { @Test public final void loadFromMapTest() throws IOException { Map map = new HashMap (); - map.put("inputDirectory", "/tmp"); + map.put("inputDirectory", INPUT_DIRECTORY); map.put("keepFile", false); FileSourceConfig config = FileSourceConfig.load(map); @@ -49,7 +53,7 @@ public final void loadFromMapTest() throws IOException { @Test public final void validValidateTest() throws IOException { Map map = new HashMap (); - map.put("inputDirectory", "/tmp"); + map.put("inputDirectory", INPUT_DIRECTORY); FileSourceConfig config = FileSourceConfig.load(map); assertNotNull(config); @@ -70,7 +74,7 @@ public final void missingRequiredPropertiesTest() throws IOException { @Test(expectedExceptions = com.fasterxml.jackson.databind.exc.InvalidFormatException.class) public final void InvalidBooleanPropertyTest() throws IOException { Map map = new HashMap (); - map.put("inputDirectory", "/"); + map.put("inputDirectory", INPUT_DIRECTORY); map.put("recurse", "not a boolean"); FileSourceConfig config = FileSourceConfig.load(map); @@ -82,7 +86,7 @@ public final void InvalidBooleanPropertyTest() throws IOException { expectedExceptionsMessageRegExp = "The property pollingInterval must be greater than zero") public final void ZeroValueTest() throws IOException { Map map = new HashMap (); - map.put("inputDirectory", "/"); + map.put("inputDirectory", INPUT_DIRECTORY); map.put("pollingInterval", 0); FileSourceConfig config = FileSourceConfig.load(map); @@ -94,7 +98,7 @@ public final void ZeroValueTest() throws IOException { expectedExceptionsMessageRegExp = "The property minimumFileAge must be non-negative") public final void NegativeValueTest() throws IOException { Map map = new HashMap (); - map.put("inputDirectory", "/"); + map.put("inputDirectory", INPUT_DIRECTORY); map.put("minimumFileAge", "-50"); FileSourceConfig config = FileSourceConfig.load(map); @@ -106,14 +110,40 @@ public final void NegativeValueTest() throws IOException { expectedExceptionsMessageRegExp = "Invalid Regex pattern provided for fileFilter") public final void invalidFileFilterTest() throws IOException { Map map = new HashMap (); - map.put("inputDirectory", "/"); + map.put("inputDirectory", INPUT_DIRECTORY); map.put("fileFilter", "\\"); // Results in a single '\' being sent. FileSourceConfig config = FileSourceConfig.load(map); assertNotNull(config); config.validate(); } - + + @Test + public final void keepFileTest() throws IOException { + Map map = new HashMap (); + map.put("inputDirectory", "/"); // root directory that we cannot write to + map.put("keepFile", "true"); // even though no write permission on "/", we should still be able to read + + FileSourceConfig config = FileSourceConfig.load(map); + assertNotNull(config); + assertTrue(config.getKeepFile()); + config.validate(); + } + + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "You have requested the consumed files to be deleted, " + + "but the source directory is not writeable.") + public final void invalidKeepFileTest() throws IOException { + Map map = new HashMap (); + map.put("inputDirectory", "/"); // root directory that we cannot write to + map.put("keepFile", "false"); + + FileSourceConfig config = FileSourceConfig.load(map); + assertNotNull(config); + assertFalse(config.getKeepFile()); + config.validate(); + } + private File getFile(String name) { ClassLoader classLoader = getClass().getClassLoader(); return new File(classLoader.getResource(name).getFile()); diff --git a/pulsar-io/flume/pom.xml b/pulsar-io/flume/pom.xml index 649803a8e0e03..d4606db742e63 100644 --- a/pulsar-io/flume/pom.xml +++ b/pulsar-io/flume/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-flume diff --git a/pulsar-io/flume/src/main/java/org/apache/pulsar/io/flume/FlumeConnector.java b/pulsar-io/flume/src/main/java/org/apache/pulsar/io/flume/FlumeConnector.java index 29a98478533ed..66eb2d61094da 100644 --- a/pulsar-io/flume/src/main/java/org/apache/pulsar/io/flume/FlumeConnector.java +++ b/pulsar-io/flume/src/main/java/org/apache/pulsar/io/flume/FlumeConnector.java @@ -43,10 +43,7 @@ public void StartConnector(FlumeConfig flumeConfig) throws Exception { SSLUtil.initGlobalSSLParameters(); String agentName = flumeConfig.getName(); boolean reload = !flumeConfig.getNoReloadConf(); - boolean isZkConfigured = false; - if (flumeConfig.getZkConnString().length() > 0) { - isZkConfigured = true; - } + boolean isZkConfigured = flumeConfig.getZkConnString().length() > 0; if (isZkConfigured) { // get options String zkConnectionStr = flumeConfig.getZkConnString(); diff --git a/pulsar-io/flume/src/test/java/org/apache/pulsar/io/flume/node/TestEnvVarResolverProperties.java b/pulsar-io/flume/src/test/java/org/apache/pulsar/io/flume/node/TestEnvVarResolverProperties.java index 4440acf2f1250..adfe9142d71c1 100644 --- a/pulsar-io/flume/src/test/java/org/apache/pulsar/io/flume/node/TestEnvVarResolverProperties.java +++ b/pulsar-io/flume/src/test/java/org/apache/pulsar/io/flume/node/TestEnvVarResolverProperties.java @@ -18,15 +18,16 @@ */ package org.apache.pulsar.io.flume.node; +import static org.testng.Assert.assertEquals; +import com.github.stefanbirkner.systemlambda.SystemLambda; import java.io.File; -import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.contrib.java.lang.system.EnvironmentVariables; public final class TestEnvVarResolverProperties { - private static final File TESTFILE = new File( + private static final File TEST_FILE = new File( TestEnvVarResolverProperties.class.getClassLoader() .getResource("flume-conf-with-envvars.properties").getFile()); @@ -36,34 +37,36 @@ public final class TestEnvVarResolverProperties { @Before public void setUp() { - provider = new PropertiesFileConfigurationProvider("a1", TESTFILE); + provider = new PropertiesFileConfigurationProvider("a1", TEST_FILE); } @Test - public void resolveEnvVar() { - environmentVariables.set("VARNAME", "varvalue"); - String resolved = EnvVarResolverProperties.resolveEnvVars("padding ${VARNAME} padding"); - Assert.assertEquals("padding varvalue padding", resolved); + public void resolveEnvVar() throws Exception { + SystemLambda.withEnvironmentVariable("VARNAME", "varvalue").execute(() -> { + String resolved = EnvVarResolverProperties.resolveEnvVars("padding ${VARNAME} padding"); + assertEquals(resolved, "padding varvalue padding"); + }); } @Test - public void resolveEnvVars() { - environmentVariables.set("VARNAME1", "varvalue1"); - environmentVariables.set("VARNAME2", "varvalue2"); - String resolved = EnvVarResolverProperties - .resolveEnvVars("padding ${VARNAME1} ${VARNAME2} padding"); - Assert.assertEquals("padding varvalue1 varvalue2 padding", resolved); + public void resolveEnvVars() throws Exception { + SystemLambda.withEnvironmentVariable("VARNAME1", "varvalue1") + .and("VARNAME2", "varvalue2") + .execute(() -> { + String resolved = EnvVarResolverProperties.resolveEnvVars( + "padding ${VARNAME1} ${VARNAME2} padding"); + assertEquals(resolved, "padding varvalue1 varvalue2 padding"); + }); } @Test - public void getProperty() { - String NC_PORT = "6667"; - environmentVariables.set("NC_PORT", NC_PORT); - System.setProperty("propertiesImplementation", - "org.apache.pulsar.io.flume.node.EnvVarResolverProperties"); - - Assert.assertEquals(NC_PORT, provider.getFlumeConfiguration() - .getConfigurationFor("a1") - .getSourceContext().get("r1").getParameters().get("port")); + public void getProperty() throws Exception { + SystemLambda.withEnvironmentVariable("NC_PORT", "6667").execute(() -> { + System.setProperty("propertiesImplementation", + "org.apache.pulsar.io.flume.node.EnvVarResolverProperties"); + assertEquals(provider.getFlumeConfiguration() + .getConfigurationFor("a1") + .getSourceContext().get("r1").getParameters().get("port"), "6667"); + }); } } diff --git a/pulsar-io/hbase/pom.xml b/pulsar-io/hbase/pom.xml index 0b25207840e19..4d782fb15fe03 100644 --- a/pulsar-io/hbase/pom.xml +++ b/pulsar-io/hbase/pom.xml @@ -25,7 +25,7 @@ pulsar-io org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-hbase Pulsar IO :: Hbase @@ -68,6 +68,16 @@ org.apache.hbase hbase-client ${hbase.version} + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + diff --git a/pulsar-io/hdfs2/pom.xml b/pulsar-io/hdfs2/pom.xml index 36358c4d5fc53..5f038bf810e1a 100644 --- a/pulsar-io/hdfs2/pom.xml +++ b/pulsar-io/hdfs2/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-hdfs2 Pulsar IO :: Hdfs2 @@ -49,6 +49,16 @@ org.apache.hadoop hadoop-client 2.8.5 + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + org.apache.commons diff --git a/pulsar-io/hdfs3/pom.xml b/pulsar-io/hdfs3/pom.xml index fe12a54a75b43..be94f18de041c 100644 --- a/pulsar-io/hdfs3/pom.xml +++ b/pulsar-io/hdfs3/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-hdfs3 Pulsar IO :: Hdfs3 @@ -54,6 +54,14 @@ jakarta.activation jakarta.activation-api + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + diff --git a/pulsar-io/influxdb/pom.xml b/pulsar-io/influxdb/pom.xml index 88ef951ded242..6ffdce617bf23 100644 --- a/pulsar-io/influxdb/pom.xml +++ b/pulsar-io/influxdb/pom.xml @@ -25,7 +25,7 @@ pulsar-io org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-influxdb @@ -51,13 +51,13 @@ com.influxdb influxdb-client-java - 1.6.0 + 4.0.0 org.influxdb influxdb-java - 2.7 + 2.22 com.squareup.okhttp3 diff --git a/pulsar-io/jdbc/clickhouse/pom.xml b/pulsar-io/jdbc/clickhouse/pom.xml index d09feea5499b0..73460a1b6b742 100644 --- a/pulsar-io/jdbc/clickhouse/pom.xml +++ b/pulsar-io/jdbc/clickhouse/pom.xml @@ -24,7 +24,7 @@ pulsar-io-jdbc org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 diff --git a/pulsar-io/jdbc/core/pom.xml b/pulsar-io/jdbc/core/pom.xml index 32a22f43f5a4a..6fa7af0f36d3e 100644 --- a/pulsar-io/jdbc/core/pom.xml +++ b/pulsar-io/jdbc/core/pom.xml @@ -24,7 +24,7 @@ pulsar-io-jdbc org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 diff --git a/pulsar-io/jdbc/mariadb/pom.xml b/pulsar-io/jdbc/mariadb/pom.xml index f1ba934af160e..ec9a9ac964e02 100644 --- a/pulsar-io/jdbc/mariadb/pom.xml +++ b/pulsar-io/jdbc/mariadb/pom.xml @@ -24,7 +24,7 @@ pulsar-io-jdbc org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 diff --git a/pulsar-io/jdbc/pom.xml b/pulsar-io/jdbc/pom.xml index 7935748eeef7d..59a15fca7d39a 100644 --- a/pulsar-io/jdbc/pom.xml +++ b/pulsar-io/jdbc/pom.xml @@ -32,7 +32,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-jdbc diff --git a/pulsar-io/jdbc/postgres/pom.xml b/pulsar-io/jdbc/postgres/pom.xml index 2e76f431571c1..c8c6918b94547 100644 --- a/pulsar-io/jdbc/postgres/pom.xml +++ b/pulsar-io/jdbc/postgres/pom.xml @@ -24,7 +24,7 @@ pulsar-io-jdbc org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 diff --git a/pulsar-io/jdbc/sqlite/pom.xml b/pulsar-io/jdbc/sqlite/pom.xml index 741ecc31e7817..ec215407eb340 100644 --- a/pulsar-io/jdbc/sqlite/pom.xml +++ b/pulsar-io/jdbc/sqlite/pom.xml @@ -24,7 +24,7 @@ pulsar-io-jdbc org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 pulsar-io-jdbc-sqlite diff --git a/pulsar-io/kafka-connect-adaptor-nar/pom.xml b/pulsar-io/kafka-connect-adaptor-nar/pom.xml index bf0a5fc22bb4d..f68e9bcea2e45 100644 --- a/pulsar-io/kafka-connect-adaptor-nar/pom.xml +++ b/pulsar-io/kafka-connect-adaptor-nar/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-kafka-connect-adaptor-nar diff --git a/pulsar-io/kafka-connect-adaptor/pom.xml b/pulsar-io/kafka-connect-adaptor/pom.xml index 5f07d24a304e4..6cf8d7a155b3f 100644 --- a/pulsar-io/kafka-connect-adaptor/pom.xml +++ b/pulsar-io/kafka-connect-adaptor/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-kafka-connect-adaptor @@ -38,6 +38,12 @@ ${project.version} + + ${project.groupId} + pulsar-io-common + ${project.version} + + org.apache.kafka kafka_${scala.binary.version} @@ -48,6 +54,12 @@ org.apache.kafka connect-runtime ${kafka-client.version} + + + org.apache.kafka + kafka-log4j-appender + + diff --git a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/AbstractKafkaConnectSource.java b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/AbstractKafkaConnectSource.java index 3901c5f3d176b..4612633677b1a 100644 --- a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/AbstractKafkaConnectSource.java +++ b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/AbstractKafkaConnectSource.java @@ -27,8 +27,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.connect.runtime.TaskConfig; @@ -168,6 +166,7 @@ public synchronized Record read() throws Exception { } catch (ExecutionException ex) { // log the error, continue execution log.error("execution exception while get flushFuture", ex); + throw new Exception("Flush failed", ex.getCause()); } finally { flushFuture = null; currentBatch = null; @@ -180,6 +179,12 @@ public synchronized Record read() throws Exception { public void close() { if (sourceTask != null) { sourceTask.stop(); + sourceTask = null; + } + + if (offsetStore != null) { + offsetStore.stop(); + offsetStore = null; } } @@ -187,7 +192,6 @@ public void close() { private static Map PROPERTIES = Collections.emptyMap(); private static Optional RECORD_SEQUENCE = Optional.empty(); - private static long FLUSH_TIMEOUT_MS = 60000; public abstract class AbstractKafkaSourceRecord implements Record { @Getter @@ -248,8 +252,15 @@ private void completedFlushOffset(Throwable error, Void result) { flushFuture.complete(null); } catch (InterruptedException exception) { log.warn("Flush of {} offsets interrupted, cancelling", this); + Thread.currentThread().interrupt(); + offsetWriter.cancelFlush(); + flushFuture.completeExceptionally(new Exception("Failed to commit offsets", exception)); + } catch (Throwable t) { + // SourceTask can throw unchecked ConnectException/KafkaException. + // Make sure the future is cancelled in that case + log.warn("Flush of {} offsets failed, cancelling", this); offsetWriter.cancelFlush(); - flushFuture.completeExceptionally(new Exception("Failed to commit offsets")); + flushFuture.completeExceptionally(new Exception("Failed to commit offsets", t)); } } } diff --git a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSink.java b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSink.java index 268105ce8187a..e8165f8545985 100644 --- a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSink.java +++ b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSink.java @@ -24,6 +24,18 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; @@ -44,17 +56,6 @@ import org.apache.pulsar.io.kafka.connect.schema.KafkaConnectData; import org.apache.pulsar.io.kafka.connect.schema.PulsarSchemaToKafkaSchema; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Properties; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; - import static org.apache.pulsar.io.kafka.connect.PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG; @Slf4j @@ -154,6 +155,11 @@ public void open(Map config, SinkContext ctx) throws Exception { Preconditions.checkNotNull(configs); Preconditions.checkArgument(configs.size() == 1); + // configs may contain immutable/unmodifiable maps + configs = configs.stream() + .map(HashMap::new) + .collect(Collectors.toList()); + configs.forEach(x -> { x.put(OFFSET_STORAGE_TOPIC_CONFIG, kafkaSinkConfig.getOffsetStorageTopic()); }); diff --git a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java index f84aa64cc46c3..5d30e95acefc9 100644 --- a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java +++ b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSource.java @@ -69,15 +69,12 @@ public synchronized KafkaSourceRecord processSourceRecord(final SourceRecord src return record; } - private static Map PROPERTIES = Collections.emptyMap(); - private static Optional RECORD_SEQUENCE = Optional.empty(); - private static long FLUSH_TIMEOUT_MS = 2000; + private static final AvroData avroData = new AvroData(1000); private class KafkaSourceRecord extends AbstractKafkaSourceRecord> implements KVRecord { KafkaSourceRecord(SourceRecord srcRecord) { super(srcRecord); - AvroData avroData = new AvroData(1000); byte[] keyBytes = keyConverter.fromConnectData( srcRecord.topic(), srcRecord.keySchema(), srcRecord.key()); this.key = keyBytes != null ? Optional.of(Base64.getEncoder().encodeToString(keyBytes)) : Optional.empty(); diff --git a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarKafkaWorkerConfig.java b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarKafkaWorkerConfig.java index a6cc72517fe66..bf66b6fc98ae4 100644 --- a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarKafkaWorkerConfig.java +++ b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarKafkaWorkerConfig.java @@ -43,6 +43,14 @@ public class PulsarKafkaWorkerConfig extends WorkerConfig { public static final String TOPIC_NAMESPACE_CONFIG = "topic.namespace"; private static final String TOPIC_NAMESPACE_CONFIG_DOC = "namespace of topic name to store the output topics"; + /** + * offset.storage.reader.config. + */ + public static final String OFFSET_STORAGE_READER_CONFIG = "offset.storage.reader.config"; + private static final String OFFSET_STORAGE_READER_CONFIG_DOC = "The configs of the reader for the " + + "kafka connector offsets topic, in the form of a JSON string with key-value pairs"; + + static { CONFIG = new ConfigDef() .define(OFFSET_STORAGE_TOPIC_CONFIG, @@ -53,7 +61,12 @@ public class PulsarKafkaWorkerConfig extends WorkerConfig { Type.STRING, "public/default", Importance.HIGH, - TOPIC_NAMESPACE_CONFIG_DOC); + TOPIC_NAMESPACE_CONFIG_DOC) + .define(OFFSET_STORAGE_READER_CONFIG, + Type.STRING, + null, + Importance.HIGH, + OFFSET_STORAGE_READER_CONFIG_DOC); } public PulsarKafkaWorkerConfig(Map props) { diff --git a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStore.java b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStore.java index b1338f837bb49..d2f5aeef72a79 100644 --- a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStore.java +++ b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStore.java @@ -21,6 +21,8 @@ import static com.google.common.base.Preconditions.checkArgument; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.commons.lang.StringUtils.isBlank; +import static org.apache.pulsar.io.common.IOConfigUtils.loadConfigFromJsonString; +import com.fasterxml.jackson.core.JsonProcessingException; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; @@ -30,6 +32,8 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.connect.runtime.WorkerConfig; @@ -49,9 +53,10 @@ @Slf4j public class PulsarOffsetBackingStore implements OffsetBackingStore { - private Map data; + private final Map data = new ConcurrentHashMap<>(); private PulsarClient client; private String topic; + private Map readerConfigMap = new HashMap<>(); private Producer producer; private Reader reader; private volatile CompletableFuture outstandingReadToEnd = null; @@ -65,7 +70,13 @@ public PulsarOffsetBackingStore(PulsarClient client) { public void configure(WorkerConfig workerConfig) { this.topic = workerConfig.getString(PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG); checkArgument(!isBlank(topic), "Offset storage topic must be specified"); - this.data = new HashMap<>(); + try { + this.readerConfigMap = loadConfigFromJsonString( + workerConfig.getString(PulsarKafkaWorkerConfig.OFFSET_STORAGE_READER_CONFIG)); + } catch (JsonProcessingException exception) { + log.warn("The provided reader configs are invalid, " + + "will not passing any extra config to the reader builder.", exception); + } log.info("Configure offset backing store on pulsar topic {}", topic); } @@ -126,10 +137,13 @@ private void readNext(CompletableFuture endFuture) { } void processMessage(Message message) { - synchronized (data) { + if (message.getKey() != null) { data.put( ByteBuffer.wrap(message.getKey().getBytes(UTF_8)), ByteBuffer.wrap(message.getValue())); + } else { + log.debug("Got message without key from the offset storage topic, skip it. message value: {}", + message.getValue()); } } @@ -144,26 +158,37 @@ public void start() { reader = client.newReader(Schema.BYTES) .topic(topic) .startMessageId(MessageId.earliest) + .loadConf(readerConfigMap) .create(); log.info("Successfully created reader to replay updates from topic {}", topic); CompletableFuture endFuture = new CompletableFuture<>(); readToEnd(endFuture); - endFuture.join(); + endFuture.get(); } catch (PulsarClientException e) { log.error("Failed to setup pulsar producer/reader to cluster", e); throw new RuntimeException("Failed to setup pulsar producer/reader to cluster ", e); + } catch (ExecutionException | InterruptedException e) { + log.error("Failed to start PulsarOffsetBackingStore", e); + throw new RuntimeException("Failed to start PulsarOffsetBackingStore", e); } } @Override public void stop() { + log.info("Stopping PulsarOffsetBackingStore"); if (null != producer) { + try { + producer.flush(); + } catch (PulsarClientException pce) { + log.warn("Failed to flush the producer", pce); + } try { producer.close(); } catch (PulsarClientException e) { log.warn("Failed to close producer", e); } + producer = null; } if (null != reader) { try { @@ -171,7 +196,11 @@ public void stop() { } catch (IOException e) { log.warn("Failed to close reader", e); } + reader = null; } + data.clear(); + + // do not close the client, it is provided by the sink context } @Override @@ -181,10 +210,7 @@ public Future> get(Collection keys) { return endFuture.thenApply(ignored -> { Map values = new HashMap<>(); for (ByteBuffer key : keys) { - ByteBuffer value; - synchronized (data) { - value = data.get(key); - } + ByteBuffer value = data.get(key); if (null != value) { values.put(key, value); } diff --git a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/schema/KafkaSchemaWrappedSchema.java b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/schema/KafkaSchemaWrappedSchema.java index ba57692edf17d..2db9d6cd93bc6 100644 --- a/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/schema/KafkaSchemaWrappedSchema.java +++ b/pulsar-io/kafka-connect-adaptor/src/main/java/org/apache/pulsar/io/kafka/connect/schema/KafkaSchemaWrappedSchema.java @@ -27,7 +27,6 @@ import org.apache.kafka.connect.json.JsonConverter; import org.apache.kafka.connect.storage.Converter; import org.apache.pulsar.client.api.Schema; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; @@ -45,7 +44,7 @@ public KafkaSchemaWrappedSchema(org.apache.pulsar.kafka.shade.avro.Schema schema Map props = new HashMap<>(); boolean isJsonConverter = converter instanceof JsonConverter; props.put(GenericAvroSchema.OFFSET_PROP, isJsonConverter ? "0" : "5"); - this.schemaInfo = SchemaInfoImpl.builder() + this.schemaInfo = SchemaInfo.builder() .name(isJsonConverter? "KafKaJson" : "KafkaAvro") .type(isJsonConverter ? SchemaType.JSON : SchemaType.AVRO) .schema(schema.toString().getBytes(UTF_8)) diff --git a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/ErrFileStreamSourceTask.java b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/ErrFileStreamSourceTask.java new file mode 100644 index 0000000000000..d17f32cfc47a0 --- /dev/null +++ b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/ErrFileStreamSourceTask.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.io.kafka.connect; + +import org.apache.kafka.connect.file.FileStreamSourceTask; + +public class ErrFileStreamSourceTask extends FileStreamSourceTask { + + @Override + public void commit() throws InterruptedException { + throw new org.apache.kafka.connect.errors.ConnectException("blah"); + } + +} diff --git a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSourceErrTest.java b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSourceErrTest.java new file mode 100644 index 0000000000000..cc04706f3ee45 --- /dev/null +++ b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/KafkaConnectSourceErrTest.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.io.kafka.connect; + +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.connect.file.FileStreamSourceConnector; +import org.apache.kafka.connect.runtime.TaskConfig; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.common.schema.KeyValue; +import org.apache.pulsar.functions.api.Record; +import org.apache.pulsar.io.core.SourceContext; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.io.File; +import java.io.OutputStream; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +/** + * Test the implementation of {@link KafkaConnectSource}. + */ +@Slf4j +public class KafkaConnectSourceErrTest extends ProducerConsumerBase { + + private Map config = new HashMap<>(); + private String offsetTopicName; + // The topic to publish data to, for kafkaSource + private String topicName; + private KafkaConnectSource kafkaConnectSource; + private File tempFile; + private SourceContext context; + private PulsarClient client; + + @BeforeMethod + @Override + protected void setup() throws Exception { + super.internalSetup(); + super.producerBaseSetup(); + + config.put(TaskConfig.TASK_CLASS_CONFIG, "org.apache.pulsar.io.kafka.connect.ErrFileStreamSourceTask"); + config.put(PulsarKafkaWorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter"); + config.put(PulsarKafkaWorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.storage.StringConverter"); + + this.offsetTopicName = "persistent://my-property/my-ns/kafka-connect-source-offset"; + config.put(PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG, offsetTopicName); + + this.topicName = "persistent://my-property/my-ns/kafka-connect-source"; + config.put(FileStreamSourceConnector.TOPIC_CONFIG, topicName); + tempFile = File.createTempFile("some-file-name", null); + config.put(FileStreamSourceConnector.FILE_CONFIG, tempFile.getAbsoluteFile().toString()); + config.put(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG, String.valueOf(FileStreamSourceConnector.DEFAULT_TASK_BATCH_SIZE)); + + this.context = mock(SourceContext.class); + this.client = PulsarClient.builder() + .serviceUrl(brokerUrl.toString()) + .build(); + when(context.getPulsarClient()).thenReturn(this.client); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + if (this.client != null) { + this.client.close(); + } + tempFile.delete(); + super.internalCleanup(); + } + + @Test + public void testOpenAndRead() throws Exception { + kafkaConnectSource = new KafkaConnectSource(); + kafkaConnectSource.open(config, context); + + // use FileStreamSourceConnector, each line is a record, need "\n" and end of each record. + OutputStream os = Files.newOutputStream(tempFile.toPath()); + + String line1 = "This is the first line\n"; + os.write(line1.getBytes()); + os.flush(); + log.info("write 2 lines."); + + String line2 = "This is the second line\n"; + os.write(line2.getBytes()); + os.flush(); + + log.info("finish write, will read 2 lines"); + + // Note: FileStreamSourceTask read the whole line as Value, and set Key as null. + Record> record = kafkaConnectSource.read(); + String readBack1 = new String(record.getValue().getValue()); + assertTrue(line1.contains(readBack1)); + assertNull(record.getValue().getKey()); + log.info("read line1: {}", readBack1); + record.ack(); + + record = kafkaConnectSource.read(); + String readBack2 = new String(record.getValue().getValue()); + assertTrue(line2.contains(readBack2)); + assertNull(record.getValue().getKey()); + assertTrue(record.getPartitionId().isPresent()); + assertFalse(record.getPartitionIndex().isPresent()); + log.info("read line2: {}", readBack2); + record.ack(); + + String line3 = "This is the 3rd line\n"; + os.write(line3.getBytes()); + os.flush(); + + try { + kafkaConnectSource.read(); + fail("expected exception"); + } catch (Exception e) { + log.info("got exception", e); + assertTrue(e.getCause().getCause() instanceof org.apache.kafka.connect.errors.ConnectException); + } + } +} diff --git a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStoreTest.java b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStoreTest.java index bb2eced011f52..7b71ba1a13f98 100644 --- a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStoreTest.java +++ b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/PulsarOffsetBackingStoreTest.java @@ -64,13 +64,10 @@ protected void setup() throws Exception { this.topicName = "persistent://my-property/my-ns/offset-topic"; this.defaultProps.put(PulsarKafkaWorkerConfig.OFFSET_STORAGE_TOPIC_CONFIG, topicName); - this.distributedConfig = new PulsarKafkaWorkerConfig(this.defaultProps); this.client = PulsarClient.builder() .serviceUrl(brokerUrl.toString()) .build(); this.offsetBackingStore = new PulsarOffsetBackingStore(client); - this.offsetBackingStore.configure(distributedConfig); - this.offsetBackingStore.start(); } @AfterMethod(alwaysRun = true) @@ -84,8 +81,19 @@ protected void cleanup() throws Exception { super.internalCleanup(); } + private void testOffsetBackingStore(boolean testWithReaderConfig) throws Exception { + if (testWithReaderConfig) { + this.defaultProps.put(PulsarKafkaWorkerConfig.OFFSET_STORAGE_READER_CONFIG, + "{\"subscriptionName\":\"my-subscription\"}"); + } + this.distributedConfig = new PulsarKafkaWorkerConfig(this.defaultProps); + this.offsetBackingStore.configure(distributedConfig); + this.offsetBackingStore.start(); + } + @Test public void testGetFromEmpty() throws Exception { + testOffsetBackingStore(false); assertTrue(offsetBackingStore.get( Arrays.asList(ByteBuffer.wrap("empty-key".getBytes(UTF_8))) ).get().isEmpty()); @@ -93,11 +101,13 @@ public void testGetFromEmpty() throws Exception { @Test public void testGetSet() throws Exception { + testOffsetBackingStore(false); testGetSet(false); } @Test public void testGetSetCallback() throws Exception { + testOffsetBackingStore(false); testGetSet(true); } @@ -139,4 +149,12 @@ private void testGetSet(boolean testCallback) throws Exception { assertEquals(new String(valData, UTF_8), "test-val-" + idx); }); } + + @Test + public void testWithReaderConfig() throws Exception { + testOffsetBackingStore(true); + testGetSet(false); + List subscriptions = admin.topics().getSubscriptions(topicName); + assertTrue(subscriptions.contains("my-subscription")); + } } diff --git a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/SchemaedFileStreamSinkConnector.java b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/SchemaedFileStreamSinkConnector.java index a3cce924d1ac5..4a786617f754b 100644 --- a/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/SchemaedFileStreamSinkConnector.java +++ b/pulsar-io/kafka-connect-adaptor/src/test/java/org/apache/pulsar/io/kafka/connect/SchemaedFileStreamSinkConnector.java @@ -22,6 +22,11 @@ import org.apache.kafka.connect.connector.Task; import org.apache.kafka.connect.file.FileStreamSinkConnector; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + /** * A FileStreamSinkConnector for testing that writes data other than just a value, i.e.: * key, value, key and value schemas. @@ -31,4 +36,13 @@ public class SchemaedFileStreamSinkConnector extends FileStreamSinkConnector { public Class taskClass() { return SchemaedFileStreamSinkTask.class; } + + @Override + public List> taskConfigs(int maxTasks) { + // to test cases when task return immutable maps as configs + return super.taskConfigs(maxTasks) + .stream() + .map(Collections::unmodifiableMap) + .collect(Collectors.toList()); + } } diff --git a/pulsar-io/kafka/pom.xml b/pulsar-io/kafka/pom.xml index 1e6a3869fac31..6d6c61fb55828 100644 --- a/pulsar-io/kafka/pom.xml +++ b/pulsar-io/kafka/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-kafka @@ -70,6 +70,16 @@ io.confluent kafka-schema-registry ${kafka.confluent.schemaregistryclient.version} + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java index 2a9e1c44af456..eda8c96adc84a 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java @@ -25,7 +25,6 @@ import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.client.api.Schema; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; @@ -66,7 +65,7 @@ private Schema fetchSchema(int schemaId) { org.apache.avro.Schema schema = schemaRegistryClient.getById(schemaId); String definition = schema.toString(false); log.info("Schema {} definition {}", schemaId, definition); - SchemaInfo schemaInfo = SchemaInfoImpl.builder() + SchemaInfo schemaInfo = SchemaInfo.builder() .type(SchemaType.AVRO) .name(schema.getName()) .properties(Collections.emptyMap()) diff --git a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java index 3fa687eceb638..332a080cc05af 100644 --- a/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java +++ b/pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/KafkaSourceConfig.java @@ -19,6 +19,7 @@ package org.apache.pulsar.io.kafka; +import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import lombok.Data; @@ -112,6 +113,7 @@ public static KafkaSourceConfig load(String yamlFile) throws IOException { public static KafkaSourceConfig load(Map map) throws IOException { ObjectMapper mapper = new ObjectMapper(); + mapper.enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT); return mapper.readValue(new ObjectMapper().writeValueAsString(map), KafkaSourceConfig.class); } } \ No newline at end of file diff --git a/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java b/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java index 1b676e2cce5b5..4bcf6ed89050e 100644 --- a/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java +++ b/pulsar-io/kafka/src/test/java/org/apache/pulsar/io/kafka/source/KafkaAbstractSourceTest.java @@ -20,6 +20,7 @@ package org.apache.pulsar.io.kafka.source; +import com.google.common.collect.ImmutableMap; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.pulsar.client.api.ConsumerBuilder; @@ -45,6 +46,7 @@ import static org.mockito.Mockito.mock; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertNull; import static org.testng.Assert.expectThrows; import static org.testng.Assert.fail; @@ -104,6 +106,23 @@ public void testInvalidConfigWillThrownException() throws Exception { source.close(); } + @Test + public void loadConsumerConfigPropertiesFromMapTest() throws Exception { + Map config = new HashMap<>(); + config.put("consumerConfigProperties", ""); + KafkaSourceConfig kafkaSourceConfig = KafkaSourceConfig.load(config); + assertNotNull(kafkaSourceConfig); + assertNull(kafkaSourceConfig.getConsumerConfigProperties()); + + config.put("consumerConfigProperties", null); + kafkaSourceConfig = KafkaSourceConfig.load(config); + assertNull(kafkaSourceConfig.getConsumerConfigProperties()); + + config.put("consumerConfigProperties", ImmutableMap.of("foo", "bar")); + kafkaSourceConfig = KafkaSourceConfig.load(config); + assertEquals(kafkaSourceConfig.getConsumerConfigProperties(), ImmutableMap.of("foo", "bar")); + } + @Test public final void loadFromYamlFileTest() throws IOException { File yamlFile = getFile("kafkaSourceConfig.yaml"); diff --git a/pulsar-io/kinesis/pom.xml b/pulsar-io/kinesis/pom.xml index 30b57475cbee1..c2d4048c6ede2 100644 --- a/pulsar-io/kinesis/pom.xml +++ b/pulsar-io/kinesis/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-kinesis diff --git a/pulsar-io/mongo/pom.xml b/pulsar-io/mongo/pom.xml index 93f2c1443f43c..41e3225a8b383 100644 --- a/pulsar-io/mongo/pom.xml +++ b/pulsar-io/mongo/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-mongo diff --git a/pulsar-io/netty/pom.xml b/pulsar-io/netty/pom.xml index 3a41643d65cba..93032fedd87d8 100644 --- a/pulsar-io/netty/pom.xml +++ b/pulsar-io/netty/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-netty diff --git a/pulsar-io/nsq/pom.xml b/pulsar-io/nsq/pom.xml index ee4dd4150ac52..b97c03d9d7307 100644 --- a/pulsar-io/nsq/pom.xml +++ b/pulsar-io/nsq/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-nsq diff --git a/pulsar-io/pom.xml b/pulsar-io/pom.xml index 2e20f7934edaa..b9cca005d32ce 100644 --- a/pulsar-io/pom.xml +++ b/pulsar-io/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io diff --git a/pulsar-io/rabbitmq/pom.xml b/pulsar-io/rabbitmq/pom.xml index cef4b1c4a48d0..5b1a51ce8e648 100644 --- a/pulsar-io/rabbitmq/pom.xml +++ b/pulsar-io/rabbitmq/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-rabbitmq diff --git a/pulsar-io/redis/pom.xml b/pulsar-io/redis/pom.xml index 30da872435bb9..c258706560840 100644 --- a/pulsar-io/redis/pom.xml +++ b/pulsar-io/redis/pom.xml @@ -25,7 +25,7 @@ pulsar-io org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-redis diff --git a/pulsar-io/solr/pom.xml b/pulsar-io/solr/pom.xml index 6b0c02465533e..386f7b720ca93 100644 --- a/pulsar-io/solr/pom.xml +++ b/pulsar-io/solr/pom.xml @@ -25,11 +25,11 @@ pulsar-io org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 - 8.6.3 + 8.11.1 pulsar-io-solr diff --git a/pulsar-io/twitter/pom.xml b/pulsar-io/twitter/pom.xml index f32034ec829f7..0a6bef8ff46f6 100644 --- a/pulsar-io/twitter/pom.xml +++ b/pulsar-io/twitter/pom.xml @@ -24,7 +24,7 @@ org.apache.pulsar pulsar-io - 2.9.0-SNAPSHOT + 2.9.3 pulsar-io-twitter diff --git a/pulsar-metadata/pom.xml b/pulsar-metadata/pom.xml index 59052d71ddba6..2072464a51f63 100644 --- a/pulsar-metadata/pom.xml +++ b/pulsar-metadata/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -77,6 +77,17 @@ + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + com.github.spotbugs spotbugs-maven-plugin diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java index 360c092b6f1b4..1272130eb76f2 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/api/MetadataCache.java @@ -148,4 +148,11 @@ public interface MetadataCache { * @param path the path of the object in the metadata store */ void invalidate(String path); + + /** + * Invalidate and reload an object in the metadata cache. + * + * @param path the path of the object in the metadata store + */ + void refresh(String path); } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/cache/impl/MetadataCacheImpl.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/cache/impl/MetadataCacheImpl.java index 16b419fe9ed91..52b1272efddea 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/cache/impl/MetadataCacheImpl.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/cache/impl/MetadataCacheImpl.java @@ -168,8 +168,7 @@ public CompletableFuture readModifyUpdateOrCreate(String path, Function { - objCache.synchronous().invalidate(path); - objCache.synchronous().refresh(path); + refresh(path); }).thenApply(__ -> newValueObj); }), path); } @@ -198,8 +197,7 @@ public CompletableFuture readModifyUpdate(String path, Function modifyF } return store.put(path, newValue, Optional.of(expectedVersion)).thenAccept(__ -> { - objCache.synchronous().invalidate(path); - objCache.synchronous().refresh(path); + refresh(path); }).thenApply(__ -> newValueObj); }), path); } @@ -220,7 +218,7 @@ public CompletableFuture create(String path, T value) { // In addition to caching the value, we need to add a watch on the path, // so when/if it changes on any other node, we are notified and we can // update the cache - objCache.get(path).whenComplete( (stat2, ex) -> { + objCache.get(path).whenComplete((stat2, ex) -> { if (ex == null) { future.complete(null); } else { @@ -261,6 +259,12 @@ public void invalidate(String path) { objCache.synchronous().invalidate(path); } + @Override + public void refresh(String path) { + // Refresh object of path if only it is cached before. + objCache.asMap().computeIfPresent(path, (oldKey, oldValue) -> readValueFromStore(path)); + } + @VisibleForTesting public void invalidateAll() { objCache.synchronous().invalidateAll(); @@ -272,12 +276,7 @@ public void accept(Notification t) { switch (t.getType()) { case Created: case Modified: - if (objCache.synchronous().getIfPresent(path) != null) { - // Trigger background refresh of the cached item, but before make sure - // to invalidate the entry so that we won't serve a stale cached version - objCache.synchronous().invalidate(path); - objCache.synchronous().refresh(path); - } + refresh(path); break; case Deleted: @@ -291,12 +290,12 @@ public void accept(Notification t) { private CompletableFuture executeWithRetry(Supplier> op, String key) { CompletableFuture result = new CompletableFuture<>(); - op.get().thenAccept(r -> result.complete(r)).exceptionally((ex) -> { + op.get().thenAccept(result::complete).exceptionally((ex) -> { if (ex.getCause() instanceof BadVersionException) { // if resource is updated by other than metadata-cache then metadata-cache will get bad-version // exception. so, try to invalidate the cache and try one more time. objCache.synchronous().invalidate(key); - op.get().thenAccept((c) -> result.complete(null)).exceptionally((ex1) -> { + op.get().thenAccept(result::complete).exceptionally((ex1) -> { result.completeExceptionally(ex1.getCause()); return null; }); diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImpl.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImpl.java index 29cb4f97fe0e3..6599c625f788b 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImpl.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LeaderElectionImpl.java @@ -111,7 +111,10 @@ private synchronized CompletableFuture elect() { } else { return tryToBecomeLeader(); } - }); + }).thenCompose(leaderElectionState -> + // make sure that the cache contains the current leader + // so that getLeaderValueIfPresent works on all brokers + cache.get(path).thenApply(__ -> leaderElectionState)); } private synchronized CompletableFuture handleExistingLeaderValue(GetResult res) { diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LockManagerImpl.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LockManagerImpl.java index f45c0c7dd7d17..046b174d35218 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LockManagerImpl.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/LockManagerImpl.java @@ -125,7 +125,7 @@ private void handleSessionEvent(SessionEvent se) { if (se == SessionEvent.SessionReestablished) { log.info("Metadata store session has been re-established. Revalidating all the existing locks."); for (ResourceLockImpl lock : locks.values()) { - futures.add(lock.revalidate(lock.getValue())); + futures.add(lock.revalidate(lock.getValue(), true)); } } else if (se == SessionEvent.Reconnected) { diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/ResourceLockImpl.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/ResourceLockImpl.java index 677ace7218ede..05758ef114648 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/ResourceLockImpl.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/coordination/impl/ResourceLockImpl.java @@ -24,6 +24,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.bookkeeper.common.concurrent.FutureUtils; +import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.metadata.api.GetResult; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.MetadataStoreException.BadVersionException; @@ -44,6 +45,7 @@ public class ResourceLockImpl implements ResourceLock { private long version; private final CompletableFuture expiredFuture; private boolean revalidateAfterReconnection = false; + private CompletableFuture revalidateFuture; private enum State { Init, @@ -127,7 +129,7 @@ synchronized CompletableFuture acquire(T newValue) { .thenRun(() -> result.complete(null)) .exceptionally(ex -> { if (ex.getCause() instanceof LockBusyException) { - revalidate(newValue) + revalidate(newValue, false) .thenAccept(__ -> result.complete(null)) .exceptionally(ex1 -> { result.completeExceptionally(ex1); @@ -144,6 +146,9 @@ synchronized CompletableFuture acquire(T newValue) { // Simple operation of acquiring the lock with no retries, or checking for the lock content private CompletableFuture acquireWithNoRevalidation(T newValue) { + if (log.isDebugEnabled()) { + log.debug("acquireWithNoRevalidation,newValue={},version={}", newValue, version); + } byte[] payload; try { payload = serde.serialize(path, newValue); @@ -181,37 +186,65 @@ synchronized void lockWasInvalidated() { } log.info("Lock on resource {} was invalidated", path); - revalidate(value) - .thenRun(() -> log.info("Successfully revalidated the lock on {}", path)) - .exceptionally(ex -> { - synchronized (ResourceLockImpl.this) { - if (ex.getCause() instanceof BadVersionException) { - log.warn("Failed to revalidate the lock at {}. Marked as expired", path); - state = State.Released; - expiredFuture.complete(null); - } else { - // We failed to revalidate the lock due to connectivity issue - // Continue assuming we hold the lock, until we can revalidate it, either - // on Reconnected or SessionReestablished events. - log.warn("Failed to revalidate the lock at {}. Retrying later on reconnection {}", path, - ex.getCause().getMessage()); - } - } - return null; - }); + revalidate(value, true) + .thenRun(() -> log.info("Successfully revalidated the lock on {}", path)); } synchronized CompletableFuture revalidateIfNeededAfterReconnection() { if (revalidateAfterReconnection) { revalidateAfterReconnection = false; log.warn("Revalidate lock at {} after reconnection", path); - return revalidate(value); + return revalidate(value, true); } else { return CompletableFuture.completedFuture(null); } } - synchronized CompletableFuture revalidate(T newValue) { + synchronized CompletableFuture revalidate(T newValue, boolean revalidateAfterReconnection) { + if (revalidateFuture == null || revalidateFuture.isDone()) { + revalidateFuture = doRevalidate(newValue); + } else { + if (log.isDebugEnabled()) { + log.debug("Previous revalidating is not finished while revalidate newValue={}, value={}, version={}", + newValue, value, version); + } + CompletableFuture newFuture = new CompletableFuture<>(); + revalidateFuture.whenComplete((unused, throwable) -> { + doRevalidate(newValue).thenRun(() -> newFuture.complete(null)) + .exceptionally(throwable1 -> { + newFuture.completeExceptionally(throwable1); + return null; + }); + }); + revalidateFuture = newFuture; + } + revalidateFuture.exceptionally(ex -> { + synchronized (ResourceLockImpl.this) { + Throwable realCause = FutureUtil.unwrapCompletionException(ex); + if (!revalidateAfterReconnection || realCause instanceof BadVersionException + || realCause instanceof LockBusyException) { + log.warn("Failed to revalidate the lock at {}. Marked as expired. {}", + path, realCause.getMessage()); + state = State.Released; + expiredFuture.complete(null); + } else { + // We failed to revalidate the lock due to connectivity issue + // Continue assuming we hold the lock, until we can revalidate it, either + // on Reconnected or SessionReestablished events. + ResourceLockImpl.this.revalidateAfterReconnection = true; + log.warn("Failed to revalidate the lock at {}. Retrying later on reconnection {}", path, + realCause.getMessage()); + } + } + return null; + }); + return revalidateFuture; + } + + private synchronized CompletableFuture doRevalidate(T newValue) { + if (log.isDebugEnabled()) { + log.debug("doRevalidate with newValue={}, version={}", newValue, version); + } return store.get(path) .thenCompose(optGetResult -> { if (!optGetResult.isPresent()) { diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java index 4346d8ad4088a..ac7feb4747f67 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/AbstractMetadataStore.java @@ -62,7 +62,7 @@ public abstract class AbstractMetadataStore implements MetadataStoreExtended, Co private final CopyOnWriteArrayList> listeners = new CopyOnWriteArrayList<>(); private final CopyOnWriteArrayList> sessionListeners = new CopyOnWriteArrayList<>(); - private final ExecutorService executor; + protected final ExecutorService executor; private final AsyncLoadingCache> childrenCache; private final AsyncLoadingCache existsCache; private final CopyOnWriteArrayList> metadataCaches = new CopyOnWriteArrayList<>(); @@ -252,7 +252,7 @@ public final CompletableFuture put(String path, byte[] data, Optional c.invalidate(path)); + metadataCaches.forEach(c -> c.refresh(path)); return stat; }); } diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java index 2cee1c7a627f6..c59ea0a940bb2 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKMetadataStore.java @@ -75,7 +75,7 @@ public ZKMetadataStore(String metadataURL, MetadataStoreConfig metadataStoreConf .sessionTimeoutMs(metadataStoreConfig.getSessionTimeoutMillis()) .watchers(Collections.singleton(event -> { if (sessionWatcher != null) { - sessionWatcher.ifPresent(sw -> sw.process(event)); + sessionWatcher.ifPresent(sw -> executor.execute(() -> sw.process(event))); } })) .build(); diff --git a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java index 9ba6be57b4e71..51b3df4805ead 100644 --- a/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java +++ b/pulsar-metadata/src/main/java/org/apache/pulsar/metadata/impl/ZKSessionWatcher.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.metadata.impl; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import io.netty.util.concurrent.DefaultThreadFactory; import java.util.concurrent.CompletableFuture; @@ -67,8 +68,10 @@ public ZKSessionWatcher(ZooKeeper zk, Consumer sessionListener) { this.scheduler = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("metadata-store-zk-session-watcher")); - this.task = scheduler.scheduleAtFixedRate(this::checkConnectionStatus, tickTimeMillis, tickTimeMillis, - TimeUnit.MILLISECONDS); + this.task = + scheduler.scheduleAtFixedRate(catchingAndLoggingThrowables(this::checkConnectionStatus), tickTimeMillis, + tickTimeMillis, + TimeUnit.MILLISECONDS); this.currentStatus = SessionEvent.SessionReestablished; } diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BacklogQuotaCompatibilityTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BacklogQuotaCompatibilityTest.java index 06765e307ef29..f621a3a452cbc 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BacklogQuotaCompatibilityTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/BacklogQuotaCompatibilityTest.java @@ -19,15 +19,70 @@ package org.apache.pulsar.metadata; import static org.testng.Assert.assertEquals; + +import com.fasterxml.jackson.databind.JavaType; import com.fasterxml.jackson.databind.type.TypeFactory; import java.io.IOException; +import java.util.HashMap; + import org.apache.pulsar.common.policies.data.BacklogQuota; import org.apache.pulsar.common.policies.data.Policies; +import org.apache.pulsar.common.policies.data.impl.BacklogQuotaImpl; import org.apache.pulsar.metadata.cache.impl.JSONMetadataSerdeSimpleType; +import org.testng.Assert; import org.testng.annotations.Test; public class BacklogQuotaCompatibilityTest { + private final JavaType typeRef = TypeFactory.defaultInstance().constructSimpleType(Policies.class, null); + + private final JSONMetadataSerdeSimpleType simpleType = new JSONMetadataSerdeSimpleType<>(typeRef); + + private final BacklogQuota.RetentionPolicy testPolicy = BacklogQuota.RetentionPolicy.consumer_backlog_eviction; + + @Test + public void testV27ClientSetV28BrokerRead() throws Exception { + Policies writePolicy = new Policies(); + BacklogQuotaImpl writeBacklogQuota = new BacklogQuotaImpl(); + writeBacklogQuota.setLimit(1024); + writeBacklogQuota.setLimitTime(60); + writeBacklogQuota.setPolicy(testPolicy); + HashMap quotaHashMap = new HashMap<>(); + quotaHashMap.put(BacklogQuota.BacklogQuotaType.destination_storage, writeBacklogQuota); + writePolicy.backlog_quota_map = quotaHashMap; + byte[] serialize = simpleType.serialize("/path", writePolicy); + Policies policies = simpleType.deserialize("/path", serialize, null); + BacklogQuota readBacklogQuota = policies.backlog_quota_map.get(BacklogQuota.BacklogQuotaType.destination_storage); + Assert.assertEquals(readBacklogQuota.getLimitSize(), 1024); + Assert.assertEquals(readBacklogQuota.getLimitTime(), 60); + Assert.assertEquals(readBacklogQuota.getPolicy(), testPolicy); + } + + @Test + public void testV28ClientSetV28BrokerRead() throws Exception { + Policies writePolicy = new Policies(); + BacklogQuotaImpl writeBacklogQuota = new BacklogQuotaImpl(); + writeBacklogQuota.setLimitSize(1024); + writeBacklogQuota.setLimitTime(60); + writeBacklogQuota.setPolicy(testPolicy); + HashMap quotaHashMap = new HashMap<>(); + quotaHashMap.put(BacklogQuota.BacklogQuotaType.destination_storage, writeBacklogQuota); + writePolicy.backlog_quota_map = quotaHashMap; + byte[] serialize = simpleType.serialize("/path", writePolicy); + Policies policies = simpleType.deserialize("/path", serialize, null); + BacklogQuota readBacklogQuota = policies.backlog_quota_map.get(BacklogQuota.BacklogQuotaType.destination_storage); + Assert.assertEquals(readBacklogQuota.getLimit(), 1024); + Assert.assertEquals(readBacklogQuota.getLimitTime(), 60); + Assert.assertEquals(readBacklogQuota.getPolicy(), testPolicy); + } + + @Test + public void testV28ClientSetV27BrokerRead() { + BacklogQuotaImpl writeBacklogQuota = new BacklogQuotaImpl(); + writeBacklogQuota.setLimitSize(1024); + Assert.assertEquals(1024, writeBacklogQuota.getLimit()); + } + @Test public void testBackwardCompatibility() throws IOException { String oldPolicyStr = "{\"auth_policies\":{\"namespace_auth\":{},\"destination_auth\":{}," @@ -41,10 +96,7 @@ public void testBackwardCompatibility() throws IOException { + "\"schema_auto_update_compatibility_strategy\":\"Full\",\"schema_compatibility_strategy\":" + "\"UNDEFINED\",\"is_allow_auto_update_schema\":true,\"schema_validation_enforced\":false," + "\"subscription_types_enabled\":[]}\n"; - - JSONMetadataSerdeSimpleType jsonMetadataSerdeSimpleType = new JSONMetadataSerdeSimpleType( - TypeFactory.defaultInstance().constructSimpleType(Policies.class, null)); - Policies policies = (Policies) jsonMetadataSerdeSimpleType.deserialize(null, oldPolicyStr.getBytes(), null); + Policies policies = simpleType.deserialize(null, oldPolicyStr.getBytes(), null); assertEquals(policies.backlog_quota_map.get(BacklogQuota.BacklogQuotaType.destination_storage).getLimitSize(), 1001); assertEquals(policies.backlog_quota_map.get(BacklogQuota.BacklogQuotaType.destination_storage).getLimitTime(), @@ -53,4 +105,24 @@ public void testBackwardCompatibility() throws IOException { BacklogQuota.RetentionPolicy.consumer_backlog_eviction); } + @Test + public void testBackwardCompatibilityNullLimitAndLimitSize() throws IOException { + String oldPolicyStr = "{\"auth_policies\":{\"namespace_auth\":{},\"destination_auth\":{}," + + "\"subscription_auth_roles\":{}},\"replication_clusters\":[],\"backlog_quota_map\":" + + "{\"destination_storage\":{\"policy\":\"consumer_backlog_eviction\"}}," + + "\"clusterDispatchRate\":{},\"topicDispatchRate\":{},\"subscriptionDispatchRate\":{}," + + "\"replicatorDispatchRate\":{},\"clusterSubscribeRate\":{},\"publishMaxMessageRate\":{}," + + "\"latency_stats_sample_rate\":{},\"subscription_expiration_time_minutes\":0,\"deleted\":false," + + "\"encryption_required\":false,\"subscription_auth_mode\":\"None\"," + + "\"max_consumers_per_subscription\":0,\"offload_threshold\":-1," + + "\"schema_auto_update_compatibility_strategy\":\"Full\",\"schema_compatibility_strategy\":" + + "\"UNDEFINED\",\"is_allow_auto_update_schema\":true,\"schema_validation_enforced\":false," + + "\"subscription_types_enabled\":[]}\n"; + Policies policies = simpleType.deserialize(null, oldPolicyStr.getBytes(), null); + assertEquals(policies.backlog_quota_map.get(BacklogQuota.BacklogQuotaType.destination_storage).getLimitSize(), + 0); + assertEquals(policies.backlog_quota_map.get(BacklogQuota.BacklogQuotaType.destination_storage).getLimitTime(), + 0); + } + } diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LeaderElectionTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LeaderElectionTest.java index 8412c7129873e..c8a6fed64a61d 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LeaderElectionTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LeaderElectionTest.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.metadata; +import static org.apache.pulsar.metadata.MetadataCacheTest.assertEqualsAndRetry; import static org.testng.Assert.assertEquals; import java.util.EnumSet; import java.util.Optional; @@ -111,21 +112,21 @@ public void multipleMembers(String provider, Supplier urlSupplier) throw LeaderElectionState les1 = le1.elect("test-1").join(); assertEquals(les1, LeaderElectionState.Leading); - assertEquals(le1.getLeaderValueIfPresent(), Optional.of("test-1")); + assertEqualsAndRetry(() -> le1.getLeaderValueIfPresent(), Optional.of("test-1"), Optional.empty()); assertEquals(le1.getLeaderValue().join(), Optional.of("test-1")); assertEquals(n1.poll(3, TimeUnit.SECONDS), LeaderElectionState.Leading); LeaderElectionState les2 = le2.elect("test-2").join(); assertEquals(les2, LeaderElectionState.Following); assertEquals(le2.getLeaderValue().join(), Optional.of("test-1")); - assertEquals(le2.getLeaderValueIfPresent(), Optional.of("test-1")); + assertEqualsAndRetry(() -> le2.getLeaderValueIfPresent(), Optional.of("test-1"), Optional.empty()); assertEquals(n2.poll(3, TimeUnit.SECONDS), LeaderElectionState.Following); le1.close(); assertEquals(n2.poll(3, TimeUnit.SECONDS), LeaderElectionState.Leading); assertEquals(le2.getState(), LeaderElectionState.Leading); - assertEquals(le2.getLeaderValueIfPresent(), Optional.of("test-2")); + assertEqualsAndRetry(() -> le2.getLeaderValueIfPresent(), Optional.of("test-2"), Optional.empty()); assertEquals(le2.getLeaderValue().join(), Optional.of("test-2")); } @@ -209,7 +210,7 @@ public void revalidateLeaderWithinSameSession(String provider, Supplier LeaderElectionState les = le.elect("test-2").join(); assertEquals(les, LeaderElectionState.Leading); assertEquals(le.getLeaderValue().join(), Optional.of("test-2")); - assertEquals(le.getLeaderValueIfPresent(), Optional.of("test-2")); + assertEqualsAndRetry(() -> le.getLeaderValueIfPresent(), Optional.of("test-2"), Optional.empty()); } @Test(dataProvider = "impl") @@ -239,7 +240,7 @@ public void revalidateLeaderWithDifferentSessionsSameValue(String provider, Supp LeaderElectionState les = le.elect("test-1").join(); assertEquals(les, LeaderElectionState.Leading); assertEquals(le.getLeaderValue().join(), Optional.of("test-1")); - assertEquals(le.getLeaderValueIfPresent(), Optional.of("test-1")); + assertEqualsAndRetry(() -> le.getLeaderValueIfPresent(), Optional.of("test-1"), Optional.empty()); } @@ -275,6 +276,6 @@ public void revalidateLeaderWithDifferentSessionsDifferentValue(String provider, LeaderElectionState les = le.elect("test-2").join(); assertEquals(les, LeaderElectionState.Following); assertEquals(le.getLeaderValue().join(), Optional.of("test-1")); - assertEquals(le.getLeaderValueIfPresent(), Optional.of("test-1")); + assertEqualsAndRetry(() -> le.getLeaderValueIfPresent(), Optional.of("test-1"), Optional.empty()); } } diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LockManagerTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LockManagerTest.java index da080baacab58..ef94f2f3fe22a 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LockManagerTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/LockManagerTest.java @@ -20,6 +20,8 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; @@ -29,6 +31,7 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import lombok.Cleanup; import org.apache.pulsar.common.util.ObjectMapperFactory; @@ -286,4 +289,60 @@ public void revalidateLockOnDifferentSession(String provider, Supplier u assertEquals(new String(store1.get(path2).join().get().getValue()), "\"value-1\""); }); } + + @Test(dataProvider = "impl") + public void testCleanUpStateWhenRevalidationGotLockBusy(String provider, Supplier urlSupplier) + throws Exception { + + if (provider.equals("Memory") || provider.equals("RocksDB")) { + // Local memory provider doesn't really have the concept of multiple sessions + return; + } + + @Cleanup + MetadataStoreExtended store1 = MetadataStoreExtended.create(urlSupplier.get(), + MetadataStoreConfig.builder().build()); + @Cleanup + MetadataStoreExtended store2 = MetadataStoreExtended.create(urlSupplier.get(), + MetadataStoreConfig.builder().build()); + + @Cleanup + CoordinationService cs1 = new CoordinationServiceImpl(store1); + @Cleanup + LockManager lm1 = cs1.getLockManager(String.class); + + @Cleanup + CoordinationService cs2 = new CoordinationServiceImpl(store2); + @Cleanup + LockManager lm2 = cs2.getLockManager(String.class); + + String path1 = newKey(); + + ResourceLock lock1 = lm1.acquireLock(path1, "value-1").join(); + AtomicReference> lock2 = new AtomicReference<>(); + // lock 2 will steal the distributed lock first. + Awaitility.await().until(()-> { + // Ensure steal the lock success. + try { + lock2.set(lm2.acquireLock(path1, "value-1").join()); + return true; + } catch (Exception ex) { + return false; + } + }); + + // Since we can steal the lock repeatedly, we don't know which one will get it. + // But we can verify the final state. + Awaitility.await().untilAsserted(() -> { + if (lock1.getLockExpiredFuture().isDone()) { + assertTrue(lm1.listLocks(path1).join().isEmpty()); + assertFalse(lock2.get().getLockExpiredFuture().isDone()); + } else if (lock2.get().getLockExpiredFuture().isDone()) { + assertTrue(lm2.listLocks(path1).join().isEmpty()); + assertFalse(lock1.getLockExpiredFuture().isDone()); + } else { + fail("unexpected behaviour"); + } + }); + } } diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataCacheTest.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataCacheTest.java index 70ed621abd582..a2b69cd4757e1 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataCacheTest.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/MetadataCacheTest.java @@ -36,6 +36,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; import java.util.function.Supplier; import lombok.AllArgsConstructor; import lombok.Cleanup; @@ -277,6 +278,34 @@ public void insertionDeletion(String provider, Supplier urlSupplier) thr assertEquals(objCache.get(key1).join(), Optional.empty()); } + @Test(dataProvider = "impl") + public void insertionWithInvalidation(String provider, Supplier urlSupplier) throws Exception { + @Cleanup + MetadataStore store = MetadataStoreFactory.create(urlSupplier.get(), MetadataStoreConfig.builder().build()); + MetadataCache objCache = store.getMetadataCache(MyClass.class); + + String key1 = newKey(); + + assertEquals(objCache.getIfCached(key1), Optional.empty()); + assertEquals(objCache.get(key1).join(), Optional.empty()); + + MyClass value1 = new MyClass("a", 1); + store.put(key1, ObjectMapperFactory.getThreadLocal().writeValueAsBytes(value1), Optional.of(-1L)).join(); + + Awaitility.await().untilAsserted(() -> { + assertEquals(objCache.getIfCached(key1), Optional.of(value1)); + assertEquals(objCache.get(key1).join(), Optional.of(value1)); + }); + + MyClass value2 = new MyClass("a", 2); + store.put(key1, ObjectMapperFactory.getThreadLocal().writeValueAsBytes(value2), Optional.of(0L)).join(); + + Awaitility.await().untilAsserted(() -> { + assertEquals(objCache.getIfCached(key1), Optional.of(value2)); + assertEquals(objCache.get(key1).join(), Optional.of(value2)); + }); + } + @Test(dataProvider = "impl") public void insertionOutsideCache(String provider, Supplier urlSupplier) throws Exception { @Cleanup @@ -296,21 +325,24 @@ public void insertionOutsideCache(String provider, Supplier urlSupplier) } @Test(dataProvider = "impl") - public void insertionOutsideCacheWithGenericType(String provider, Supplier urlSupplier) throws Exception { + public void updateOutsideCacheWithGenericType(String provider, Supplier urlSupplier) throws Exception { @Cleanup MetadataStore store = MetadataStoreFactory.create(urlSupplier.get(), MetadataStoreConfig.builder().build()); MetadataCache> objCache = store.getMetadataCache(new TypeReference>() { }); String key1 = newKey(); + objCache.get(key1); Map v = new TreeMap<>(); v.put("a", "1"); v.put("b", "2"); store.put(key1, ObjectMapperFactory.getThreadLocal().writeValueAsBytes(v), Optional.of(-1L)).join(); - assertEquals(objCache.getIfCached(key1), Optional.empty()); - assertEquals(objCache.get(key1).join(), Optional.of(v)); + Awaitility.await().untilAsserted(() -> { + assertEquals(objCache.getIfCached(key1), Optional.of(v)); + assertEquals(objCache.get(key1).join(), Optional.of(v)); + }); } @Test(dataProvider = "impl") @@ -460,15 +492,21 @@ public void readModifyUpdateBadVersionRetry() throws Exception { MyClass value1 = new MyClass("a", 1); objCache1.create(key1, value1).join(); - objCache1.get(key1).join(); + assertEquals(objCache1.get(key1).join().get().b, 1); - objCache2.readModifyUpdate(key1, v -> { + CompletableFuture future1 = objCache1.readModifyUpdate(key1, v -> { return new MyClass(v.a, v.b + 1); - }).join(); + }); - objCache1.readModifyUpdate(key1, v -> { + CompletableFuture future2 = objCache2.readModifyUpdate(key1, v -> { return new MyClass(v.a, v.b + 1); - }).join(); + }); + + MyClass myClass1 = future1.join(); + assertEquals(myClass1.b, 2); + + MyClass myClass2 = future2.join(); + assertEquals(myClass2.b, 3); } @Test(dataProvider = "impl") @@ -555,4 +593,35 @@ public CustomClass deserialize(String path, byte[] content, Stat stat) throws IO assertEquals(res.getValue().b, 2); assertEquals(res.getValue().path, key1); } + + public static void assertEqualsAndRetry(Supplier actual, + Object expected, + Object expectedAndRetry) throws Exception { + assertEqualsAndRetry(actual, expected, expectedAndRetry, 5, 100); + } + + public static void assertEqualsAndRetry(Supplier actual, + Object expected, + Object expectedAndRetry, + int retryCount, + long intSleepTimeInMillis) throws Exception { + assertTrue(retryStrategically((__) -> { + if (actual.get().equals(expectedAndRetry)) { + return false; + } + assertEquals(actual.get(), expected); + return true; + }, retryCount, intSleepTimeInMillis)); + } + + public static boolean retryStrategically(Predicate predicate, int retryCount, long intSleepTimeInMillis) + throws Exception { + for (int i = 0; i < retryCount; i++) { + if (predicate.test(null)) { + return true; + } + Thread.sleep(intSleepTimeInMillis + (intSleepTimeInMillis * i)); + } + return false; + } } diff --git a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java index 4f83906ad96f9..9e1f7bcc4f273 100644 --- a/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java +++ b/pulsar-metadata/src/test/java/org/apache/pulsar/metadata/TestZKServer.java @@ -100,6 +100,7 @@ public void checkContainers() throws Exception { public void stop() throws Exception { if (zks != null) { zks.shutdown(); + zks.getZKDatabase().close(); zks = null; } diff --git a/pulsar-package-management/bookkeeper-storage/pom.xml b/pulsar-package-management/bookkeeper-storage/pom.xml index 32c18716e6459..a33ad930113db 100644 --- a/pulsar-package-management/bookkeeper-storage/pom.xml +++ b/pulsar-package-management/bookkeeper-storage/pom.xml @@ -25,7 +25,7 @@ pulsar-package-management org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 diff --git a/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorage.java b/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorage.java index f0db59351f5fc..e3147c0e8bc34 100644 --- a/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorage.java +++ b/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorage.java @@ -36,6 +36,7 @@ import org.apache.distributedlog.impl.metadata.BKDLConfig; import org.apache.distributedlog.metadata.DLMetadata; import org.apache.distributedlog.namespace.NamespaceDriver; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.packages.management.core.PackagesStorage; import org.apache.pulsar.packages.management.core.PackagesStorageConfiguration; import org.apache.zookeeper.KeeperException; @@ -72,6 +73,13 @@ public void initialize() { configuration.getBookkeeperClientAuthenticationParameters()); } } + // Map arbitrary bookkeeper client configuration into DLog Config. Note that this only configures the + // bookie client. + PropertiesUtils.filterAndMapProperties(configuration.getProperties(), "bookkeeper_", "bkc.") + .forEach((key, value) -> { + log.info("Applying DLog BookKeeper client configuration setting {}={}", key, value); + conf.setProperty(key, value); + }); try { this.namespace = NamespaceBuilder.newBuilder() .conf(conf).clientId(NS_CLIENT_ID).uri(initializeDlogNamespace()).build(); diff --git a/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageConfiguration.java b/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageConfiguration.java index 226b80abeaa30..ce6acecdd5100 100644 --- a/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageConfiguration.java +++ b/pulsar-package-management/bookkeeper-storage/src/main/java/org/apache/pulsar/packages/management/storage/bookkeeper/BookKeeperPackagesStorageConfiguration.java @@ -58,6 +58,10 @@ String getBookkeeperClientAuthenticationParameters() { return getProperty("bookkeeperClientAuthenticationParameters"); } + @Override + public Properties getProperties() { + return configuration.getProperties(); + } @Override public String getProperty(String key) { diff --git a/pulsar-package-management/core/pom.xml b/pulsar-package-management/core/pom.xml index f6cfbee94a8fd..199f21d7a468a 100644 --- a/pulsar-package-management/core/pom.xml +++ b/pulsar-package-management/core/pom.xml @@ -25,7 +25,7 @@ pulsar-package-management org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 diff --git a/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/PackagesStorageConfiguration.java b/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/PackagesStorageConfiguration.java index b4044a6338c20..5c346a0d05c42 100644 --- a/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/PackagesStorageConfiguration.java +++ b/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/PackagesStorageConfiguration.java @@ -50,4 +50,10 @@ public interface PackagesStorageConfiguration { * a group of the property */ void setProperty(Properties properties); + + /** + * Get all properties for the configuration. + * @return all properties for the configuration + */ + Properties getProperties(); } diff --git a/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/impl/DefaultPackagesStorageConfiguration.java b/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/impl/DefaultPackagesStorageConfiguration.java index cb35048a360b5..d3c5d7494b370 100644 --- a/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/impl/DefaultPackagesStorageConfiguration.java +++ b/pulsar-package-management/core/src/main/java/org/apache/pulsar/packages/management/core/impl/DefaultPackagesStorageConfiguration.java @@ -39,4 +39,9 @@ public void setProperty(String key, String value) { public void setProperty(Properties properties) { this.properties = properties; } + + @Override + public Properties getProperties() { + return this.properties; + } } diff --git a/pulsar-package-management/pom.xml b/pulsar-package-management/pom.xml index 8946793f6135e..4f0714cfd5815 100644 --- a/pulsar-package-management/pom.xml +++ b/pulsar-package-management/pom.xml @@ -25,7 +25,7 @@ pulsar org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. 4.0.0 diff --git a/pulsar-proxy/pom.xml b/pulsar-proxy/pom.xml index 277f63c184e83..f0687580ee827 100644 --- a/pulsar-proxy/pom.xml +++ b/pulsar-proxy/pom.xml @@ -19,12 +19,12 @@ --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-proxy @@ -174,10 +174,22 @@ com.beust jcommander - - org.apache.logging.log4j - log4j-core - + + + org.apache.logging.log4j + log4j-core + + + + com.github.seancfoley + ipaddress + ${seancfoley.ipaddress.version} + + + org.awaitility + awaitility + test + diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java index 13f9372fa1dea..bd1bad27b2b6a 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/AdminProxyHandler.java @@ -53,6 +53,7 @@ import org.eclipse.jetty.client.RedirectProtocolHandler; import org.eclipse.jetty.client.api.ContentProvider; import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.client.http.HttpClientTransportOverHTTP; import org.eclipse.jetty.http.HttpHeader; import org.eclipse.jetty.proxy.ProxyServlet; import org.eclipse.jetty.util.HttpCookieStore; @@ -209,12 +210,14 @@ protected ByteBuffer onRead(byte[] buffer, int offset, int length) { } private static class JettyHttpClient extends HttpClient { + private static final int NUMBER_OF_SELECTOR_THREADS = 1; + public JettyHttpClient() { - super(); + super(new HttpClientTransportOverHTTP(NUMBER_OF_SELECTOR_THREADS), null); } public JettyHttpClient(SslContextFactory sslContextFactory) { - super(sslContextFactory); + super(new HttpClientTransportOverHTTP(NUMBER_OF_SELECTOR_THREADS), sslContextFactory); } /** @@ -256,31 +259,35 @@ protected HttpClient newHttpClient() { if (config.isTlsEnabledWithBroker()) { try { - X509Certificate trustCertificates[] = SecurityUtility + X509Certificate[] trustCertificates = SecurityUtility .loadCertificatesFromPemFile(config.getBrokerClientTrustCertsFilePath()); SSLContext sslCtx; AuthenticationDataProvider authData = auth.getAuthData(); if (authData.hasDataForTls()) { sslCtx = SecurityUtility.createSslContext( - config.isTlsAllowInsecureConnection(), - trustCertificates, - authData.getTlsCertificates(), - authData.getTlsPrivateKey() + config.isTlsAllowInsecureConnection(), + trustCertificates, + authData.getTlsCertificates(), + authData.getTlsPrivateKey(), + config.getBrokerClientSslProvider() ); } else { sslCtx = SecurityUtility.createSslContext( - config.isTlsAllowInsecureConnection(), - trustCertificates + config.isTlsAllowInsecureConnection(), + trustCertificates, + config.getBrokerClientSslProvider() ); } - - SslContextFactory contextFactory = new SslContextFactory.Client(true); + SslContextFactory contextFactory = new SslContextFactory.Client(); contextFactory.setSslContext(sslCtx); - + if (!config.isTlsHostnameVerificationEnabled()) { + contextFactory.setEndpointIdentificationAlgorithm(null); + } return new JettyHttpClient(contextFactory); } catch (Exception e) { + LOG.error("new jetty http client exception ", e); try { auth.close(); } catch (IOException ioe) { @@ -303,7 +310,7 @@ protected String rewriteTarget(HttpServletRequest request) { boolean isFunctionsRestRequest = false; String requestUri = request.getRequestURI(); - for (String routePrefix: functionRoutes) { + for (String routePrefix : functionRoutes) { if (requestUri.startsWith(routePrefix)) { isFunctionsRestRequest = true; break; @@ -324,7 +331,7 @@ protected String rewriteTarget(HttpServletRequest request) { if (LOG.isDebugEnabled()) { LOG.debug("[{}:{}] Selected active broker is {}", request.getRemoteAddr(), request.getRemotePort(), - url.toString()); + url); } } catch (Exception e) { LOG.warn("[{}:{}] Failed to get next active broker {}", request.getRemoteAddr(), diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/BrokerProxyValidator.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/BrokerProxyValidator.java new file mode 100644 index 0000000000000..b0529c2a777e1 --- /dev/null +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/BrokerProxyValidator.java @@ -0,0 +1,181 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.proxy.server; + +import inet.ipaddr.IPAddress; +import inet.ipaddr.IPAddressString; +import inet.ipaddr.ipv4.IPv4Address; +import inet.ipaddr.ipv6.IPv6Address; +import io.netty.resolver.AddressResolver; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.StringTokenizer; +import java.util.concurrent.CompletableFuture; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.common.util.FutureUtil; +import org.apache.pulsar.common.util.netty.NettyFutureUtil; + +@Slf4j +public class BrokerProxyValidator { + private static final String SEPARATOR = "\\s*,\\s*"; + private static final String ALLOW_ANY = "*"; + private final int[] allowedTargetPorts; + private final boolean allowAnyTargetPort; + private final List allowedIPAddresses; + private final boolean allowAnyIPAddress; + private final AddressResolver inetSocketAddressResolver; + private final List allowedHostNames; + private final boolean allowAnyHostName; + + public BrokerProxyValidator(AddressResolver inetSocketAddressResolver, String allowedHostNames, + String allowedIPAddresses, String allowedTargetPorts) { + this.inetSocketAddressResolver = inetSocketAddressResolver; + List allowedHostNamesStrings = parseCommaSeparatedConfigValue(allowedHostNames); + if (allowedHostNamesStrings.contains(ALLOW_ANY)) { + this.allowAnyHostName = true; + this.allowedHostNames = Collections.emptyList(); + } else { + this.allowAnyHostName = false; + this.allowedHostNames = allowedHostNamesStrings.stream() + .map(BrokerProxyValidator::parseWildcardPattern).collect(Collectors.toList()); + } + List allowedIPAddressesStrings = parseCommaSeparatedConfigValue(allowedIPAddresses); + if (allowedIPAddressesStrings.contains(ALLOW_ANY)) { + allowAnyIPAddress = true; + this.allowedIPAddresses = Collections.emptyList(); + } else { + allowAnyIPAddress = false; + this.allowedIPAddresses = allowedIPAddressesStrings.stream().map(IPAddressString::new) + .filter(ipAddressString -> { + if (ipAddressString.isValid()) { + return true; + } else { + throw new IllegalArgumentException("Invalid IP address filter '" + ipAddressString + "'", + ipAddressString.getAddressStringException()); + } + }).map(IPAddressString::getAddress) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + List allowedTargetPortsStrings = parseCommaSeparatedConfigValue(allowedTargetPorts); + if (allowedTargetPortsStrings.contains(ALLOW_ANY)) { + allowAnyTargetPort = true; + this.allowedTargetPorts = new int[0]; + } else { + allowAnyTargetPort = false; + this.allowedTargetPorts = + allowedTargetPortsStrings.stream().mapToInt(Integer::parseInt).toArray(); + } + } + + private static Pattern parseWildcardPattern(String wildcardPattern) { + String regexPattern = + Collections.list(new StringTokenizer(wildcardPattern, "*", true)) + .stream() + .map(String::valueOf) + .map(token -> { + if ("*".equals(token)) { + return ".*"; + } else { + return Pattern.quote(token); + } + }).collect(Collectors.joining()); + return Pattern.compile( + "^" + regexPattern + "$", + Pattern.CASE_INSENSITIVE); + } + + private static List parseCommaSeparatedConfigValue(String configValue) { + return Arrays.stream(configValue.split(SEPARATOR)).map(String::trim).filter(s -> s.length() > 0) + .collect(Collectors.toList()); + } + + public CompletableFuture resolveAndCheckTargetAddress(String hostAndPort) { + int pos = hostAndPort.lastIndexOf(':'); + String host = hostAndPort.substring(0, pos); + int port = Integer.parseInt(hostAndPort.substring(pos + 1)); + if (!isPortAllowed(port)) { + return FutureUtil.failedFuture( + new TargetAddressDeniedException("Given port in '" + hostAndPort + "' isn't allowed.")); + } else if (!isHostAllowed(host)) { + return FutureUtil.failedFuture( + new TargetAddressDeniedException("Given host in '" + hostAndPort + "' isn't allowed.")); + } else { + return NettyFutureUtil.toCompletableFuture( + inetSocketAddressResolver.resolve(InetSocketAddress.createUnresolved(host, port))) + .thenCompose(resolvedAddress -> { + CompletableFuture result = new CompletableFuture(); + if (isIPAddressAllowed(resolvedAddress)) { + result.complete(resolvedAddress); + } else { + result.completeExceptionally(new TargetAddressDeniedException( + "The IP address of the given host and port '" + hostAndPort + "' isn't allowed.")); + } + return result; + }); + } + } + + private boolean isPortAllowed(int port) { + if (allowAnyTargetPort) { + return true; + } + for (int allowedPort : allowedTargetPorts) { + if (allowedPort == port) { + return true; + } + } + return false; + } + + private boolean isIPAddressAllowed(InetSocketAddress resolvedAddress) { + if (allowAnyIPAddress) { + return true; + } + byte[] addressBytes = resolvedAddress.getAddress().getAddress(); + IPAddress candidateAddress = + addressBytes.length == 4 ? new IPv4Address(addressBytes) : new IPv6Address(addressBytes); + for (IPAddress allowedAddress : allowedIPAddresses) { + if (allowedAddress.contains(candidateAddress)) { + return true; + } + } + return false; + } + + private boolean isHostAllowed(String host) { + if (allowAnyHostName) { + return true; + } + boolean matched = false; + for (Pattern allowedHostName : allowedHostNames) { + if (allowedHostName.matcher(host).matches()) { + matched = true; + break; + } + } + return matched; + } +} diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/DirectProxyHandler.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/DirectProxyHandler.java index c896be5a62993..8ffcdb0acd5c1 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/DirectProxyHandler.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/DirectProxyHandler.java @@ -21,15 +21,14 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; - +import static org.apache.commons.lang3.StringUtils.isEmpty; import io.netty.bootstrap.Bootstrap; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelId; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.socket.SocketChannel; @@ -38,28 +37,20 @@ import io.netty.handler.codec.haproxy.HAProxyMessage; import io.netty.handler.codec.haproxy.HAProxyProtocolVersion; import io.netty.handler.codec.haproxy.HAProxyProxiedProtocol; +import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.SslProvider; +import io.netty.handler.timeout.ReadTimeoutHandler; import io.netty.util.CharsetUtil; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.FutureListener; - import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URISyntaxException; import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Supplier; - -import javax.net.ssl.SSLSession; - +import java.util.concurrent.TimeUnit; import lombok.Getter; - import org.apache.pulsar.PulsarVersion; import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.api.AuthenticationDataProvider; +import org.apache.pulsar.client.api.AuthenticationFactory; import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.common.tls.TlsHostnameVerifier; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.AuthData; import org.apache.pulsar.common.api.proto.CommandAuthChallenge; @@ -67,40 +58,99 @@ import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.PulsarDecoder; import org.apache.pulsar.common.stats.Rate; +import org.apache.pulsar.common.util.NettyClientSslContextRefresher; +import org.apache.pulsar.common.util.SecurityUtility; +import org.apache.pulsar.common.util.SslContextAutoRefreshBuilder; +import org.apache.pulsar.common.util.keystoretls.NettySSLContextAutoRefreshBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class DirectProxyHandler { @Getter - private Channel inboundChannel; + private final Channel inboundChannel; + private final ProxyConnection proxyConnection; @Getter Channel outboundChannel; @Getter private final Rate inboundChannelRequestsRate; - protected static Map inboundOutboundChannelMap = new ConcurrentHashMap<>(); - private String originalPrincipal; - private AuthData clientAuthData; - private String clientAuthMethod; - private int protocolVersion; + private final String originalPrincipal; + private final AuthData clientAuthData; + private final String clientAuthMethod; public static final String TLS_HANDLER = "tls"; private final Authentication authentication; - private final Supplier sslHandlerSupplier; private AuthenticationDataProvider authenticationDataProvider; - private ProxyService service; - - public DirectProxyHandler(ProxyService service, ProxyConnection proxyConnection, String targetBrokerUrl, - int protocolVersion, Supplier sslHandlerSupplier) { + private final ProxyService service; + private final Runnable onHandshakeCompleteAction; + private final boolean tlsHostnameVerificationEnabled; + private final boolean tlsEnabledWithKeyStore; + private final boolean tlsEnabledWithBroker; + private final SslContextAutoRefreshBuilder clientSslCtxRefresher; + private final NettySSLContextAutoRefreshBuilder clientSSLContextAutoRefreshBuilder; + + public DirectProxyHandler(ProxyService service, ProxyConnection proxyConnection) { this.service = service; this.authentication = proxyConnection.getClientAuthentication(); this.inboundChannel = proxyConnection.ctx().channel(); + this.proxyConnection = proxyConnection; this.inboundChannelRequestsRate = new Rate(); this.originalPrincipal = proxyConnection.clientAuthRole; this.clientAuthData = proxyConnection.clientAuthData; this.clientAuthMethod = proxyConnection.clientAuthMethod; - this.protocolVersion = protocolVersion; - this.sslHandlerSupplier = sslHandlerSupplier; + this.tlsEnabledWithBroker = service.getConfiguration().isTlsEnabledWithBroker(); + this.tlsHostnameVerificationEnabled = service.getConfiguration().isTlsHostnameVerificationEnabled(); + this.tlsEnabledWithKeyStore = service.getConfiguration().isTlsEnabledWithKeyStore(); + this.onHandshakeCompleteAction = proxyConnection::cancelKeepAliveTask; + ProxyConfiguration config = service.getConfiguration(); + + if (tlsEnabledWithBroker) { + AuthenticationDataProvider authData = null; + + if (!isEmpty(config.getBrokerClientAuthenticationPlugin())) { + try { + authData = AuthenticationFactory.create(config.getBrokerClientAuthenticationPlugin(), + config.getBrokerClientAuthenticationParameters()).getAuthData(); + } catch (PulsarClientException e) { + throw new RuntimeException(e); + } + } + + if (tlsEnabledWithKeyStore) { + clientSSLContextAutoRefreshBuilder = new NettySSLContextAutoRefreshBuilder( + config.getBrokerClientSslProvider(), + config.isTlsAllowInsecureConnection(), + config.getBrokerClientTlsTrustStoreType(), + config.getBrokerClientTlsTrustStore(), + config.getBrokerClientTlsTrustStorePassword(), + config.getBrokerClientTlsCiphers(), + config.getBrokerClientTlsProtocols(), + config.getTlsCertRefreshCheckDurationSec(), + authData); + clientSslCtxRefresher = null; + } else { + SslProvider sslProvider = null; + if (config.getBrokerClientSslProvider() != null) { + sslProvider = SslProvider.valueOf(config.getBrokerClientSslProvider()); + } + clientSslCtxRefresher = new NettyClientSslContextRefresher( + sslProvider, + config.isTlsAllowInsecureConnection(), + config.getBrokerClientTrustCertsFilePath(), + authData, + config.getBrokerClientTlsCiphers(), + config.getBrokerClientTlsProtocols(), + config.getTlsCertRefreshCheckDurationSec() + ); + clientSSLContextAutoRefreshBuilder = null; + } + } else { + clientSSLContextAutoRefreshBuilder = null; + clientSslCtxRefresher = null; + } + } + + public void connect(String brokerHostAndPort, InetSocketAddress targetBrokerAddress, int protocolVersion) { ProxyConfiguration config = service.getConfiguration(); // Start the connection attempt. @@ -109,72 +159,95 @@ public DirectProxyHandler(ProxyService service, ProxyConnection proxyConnection, // switches when passing data between the 2 // connections b.option(ChannelOption.ALLOCATOR, PulsarByteBufAllocator.DEFAULT); - b.group(inboundChannel.eventLoop()).channel(inboundChannel.getClass()).option(ChannelOption.AUTO_READ, false); + int brokerProxyConnectTimeoutMs = service.getConfiguration().getBrokerProxyConnectTimeoutMs(); + if (brokerProxyConnectTimeoutMs > 0) { + b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, brokerProxyConnectTimeoutMs); + } + b.group(inboundChannel.eventLoop()) + .channel(inboundChannel.getClass()); + + String remoteHost; + try { + remoteHost = parseHost(brokerHostAndPort); + } catch (IllegalArgumentException e) { + log.warn("[{}] Failed to parse broker host '{}'", inboundChannel, brokerHostAndPort, e); + inboundChannel.close(); + return; + } + b.handler(new ChannelInitializer() { @Override - protected void initChannel(SocketChannel ch) throws Exception { - if (sslHandlerSupplier != null) { - ch.pipeline().addLast(TLS_HANDLER, sslHandlerSupplier.get()); + protected void initChannel(SocketChannel ch) { + if (tlsEnabledWithBroker) { + String host = targetBrokerAddress.getHostString(); + int port = targetBrokerAddress.getPort(); + SslHandler handler = tlsEnabledWithKeyStore + ? new SslHandler(clientSSLContextAutoRefreshBuilder.get().createSSLEngine(host, port)) + : clientSslCtxRefresher.get().newHandler(ch.alloc(), host, port); + if (tlsHostnameVerificationEnabled) { + SecurityUtility.configureSSLHandler(handler); + } + ch.pipeline().addLast(TLS_HANDLER, handler); + } + int brokerProxyReadTimeoutMs = service.getConfiguration().getBrokerProxyReadTimeoutMs(); + if (brokerProxyReadTimeoutMs > 0) { + ch.pipeline().addLast("readTimeoutHandler", + new ReadTimeoutHandler(brokerProxyReadTimeoutMs, TimeUnit.MILLISECONDS)); } ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder( Commands.DEFAULT_MAX_MESSAGE_SIZE + Commands.MESSAGE_SIZE_FRAME_PADDING, 0, 4, 0, 4)); - ch.pipeline().addLast("proxyOutboundHandler", new ProxyBackendHandler(config, protocolVersion)); + ch.pipeline().addLast("proxyOutboundHandler", + new ProxyBackendHandler(config, protocolVersion, remoteHost)); } }); - URI targetBroker; - try { - // targetBrokerUrl is coming in the "hostname:6650" form, so we need - // to extract host and port - targetBroker = new URI("pulsar://" + targetBrokerUrl); - } catch (URISyntaxException e) { - log.warn("[{}] Failed to parse broker url '{}'", inboundChannel, targetBrokerUrl, e); - inboundChannel.close(); - return; - } - - ChannelFuture f = b.connect(targetBroker.getHost(), targetBroker.getPort()); + ChannelFuture f = b.connect(targetBrokerAddress); outboundChannel = f.channel(); f.addListener(future -> { if (!future.isSuccess()) { // Close the connection if the connection attempt has failed. + log.warn("[{}] Establishing connection to {} ({}) failed. Closing inbound channel.", inboundChannel, + targetBrokerAddress, brokerHostAndPort, future.cause()); inboundChannel.close(); return; } - final ProxyBackendHandler cnx = (ProxyBackendHandler) outboundChannel.pipeline() - .get("proxyOutboundHandler"); - cnx.setRemoteHostName(targetBroker.getHost()); - - // if enable full parsing feature - if (service.getProxyLogLevel() == 2) { - //Set a map between inbound and outbound, - //so can find inbound by outbound or find outbound by inbound - inboundOutboundChannelMap.put(outboundChannel.id() , inboundChannel.id()); - } + }); + } - if (config.isHaProxyProtocolEnabled()) { - if (proxyConnection.hasHAProxyMessage()) { - outboundChannel.writeAndFlush(encodeProxyProtocolMessage(proxyConnection.getHAProxyMessage())); - } else { - if (inboundChannel.remoteAddress() instanceof InetSocketAddress) { - InetSocketAddress clientAddress = (InetSocketAddress) inboundChannel.remoteAddress(); - String sourceAddress = clientAddress.getAddress().getHostAddress(); - int sourcePort = clientAddress.getPort(); - if (outboundChannel.localAddress() instanceof InetSocketAddress) { - InetSocketAddress proxyAddress = (InetSocketAddress) inboundChannel.remoteAddress(); - String destinationAddress = proxyAddress.getAddress().getHostAddress(); - int destinationPort = proxyAddress.getPort(); - HAProxyMessage msg = new HAProxyMessage(HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, - HAProxyProxiedProtocol.TCP4, sourceAddress, destinationAddress, sourcePort, destinationPort); - outboundChannel.writeAndFlush(encodeProxyProtocolMessage(msg)); - msg.release(); - } - } - } + private static String parseHost(String brokerPortAndHost) { + int pos = brokerPortAndHost.lastIndexOf(':'); + if (pos > 0) { + return brokerPortAndHost.substring(0, pos); + } else { + throw new IllegalArgumentException("Illegal broker host:port '" + brokerPortAndHost + "'"); + } + } + + private void writeHAProxyMessage() { + if (proxyConnection.hasHAProxyMessage()) { + outboundChannel.writeAndFlush(encodeProxyProtocolMessage(proxyConnection.getHAProxyMessage())) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); + } else { + if (inboundChannel.remoteAddress() instanceof InetSocketAddress + && outboundChannel.localAddress() instanceof InetSocketAddress) { + InetSocketAddress clientAddress = (InetSocketAddress) inboundChannel.remoteAddress(); + String sourceAddress = clientAddress.getAddress().getHostAddress(); + int sourcePort = clientAddress.getPort(); + InetSocketAddress proxyAddress = (InetSocketAddress) inboundChannel.remoteAddress(); + String destinationAddress = proxyAddress.getAddress().getHostAddress(); + int destinationPort = proxyAddress.getPort(); + HAProxyMessage msg = new HAProxyMessage(HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, + HAProxyProxiedProtocol.TCP4, sourceAddress, destinationAddress, sourcePort, + destinationPort); + outboundChannel.writeAndFlush(encodeProxyProtocolMessage(msg)) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); + msg.release(); } - }); + } } + + private ByteBuf encodeProxyProtocolMessage(HAProxyMessage msg) { // Max length of v1 version proxy protocol message is 108 ByteBuf out = Unpooled.buffer(108); @@ -202,34 +275,55 @@ private ByteBuf encodeProxyProtocolMessage(HAProxyMessage msg) { (byte) 'Y', }; + public void close() { + if (outboundChannel != null) { + outboundChannel.close(); + } + } + enum BackendState { Init, HandshakeCompleted } - public class ProxyBackendHandler extends PulsarDecoder implements FutureListener { + public class ProxyBackendHandler extends PulsarDecoder { private BackendState state = BackendState.Init; - private String remoteHostName; + private final String remoteHostName; protected ChannelHandlerContext ctx; - private ProxyConfiguration config; - private int protocolVersion; + private final ProxyConfiguration config; + private final int protocolVersion; - public ProxyBackendHandler(ProxyConfiguration config, int protocolVersion) { + public ProxyBackendHandler(ProxyConfiguration config, int protocolVersion, String remoteHostName) { this.config = config; this.protocolVersion = protocolVersion; + this.remoteHostName = remoteHostName; } @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { this.ctx = ctx; + + if (config.isHaProxyProtocolEnabled()) { + writeHAProxyMessage(); + } + // Send the Connect command to broker authenticationDataProvider = authentication.getAuthData(remoteHostName); AuthData authData = authenticationDataProvider.authenticate(AuthData.INIT_AUTH_DATA); - ByteBuf command = null; + ByteBuf command; command = Commands.newConnect(authentication.getAuthMethodName(), authData, protocolVersion, "Pulsar proxy", null /* target broker */, originalPrincipal, clientAuthData, clientAuthMethod); - outboundChannel.writeAndFlush(command); - outboundChannel.read(); + outboundChannel.writeAndFlush(command) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); + } + + @Override + public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception { + // handle backpressure + // stop/resume reading input from connection between the client and the proxy + // when the writability of the connection between the proxy and the broker changes + inboundChannel.config().setAutoRead(ctx.channel().isWritable()); + super.channelWritabilityChanged(ctx); } @Override @@ -250,7 +344,8 @@ public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exce if (msg instanceof ByteBuf) { ProxyService.bytesCounter.inc(((ByteBuf) msg).readableBytes()); } - inboundChannel.writeAndFlush(msg).addListener(this); + inboundChannel.writeAndFlush(msg) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); break; default: @@ -289,24 +384,10 @@ protected void handleAuthChallenge(CommandAuthChallenge authChallenge) { log.debug("{} Mutual auth {}", ctx.channel(), authentication.getAuthMethodName()); } - outboundChannel.writeAndFlush(request); - outboundChannel.read(); + outboundChannel.writeAndFlush(request) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); } catch (Exception e) { log.error("Error mutual verify", e); - return; - } - } - - @Override - public void operationComplete(Future future) throws Exception { - // This is invoked when the write operation on the paired connection - // is completed - if (future.isSuccess()) { - outboundChannel.read(); - } else { - log.warn("[{}] [{}] Failed to write on proxy connection. Closing both connections.", inboundChannel, - outboundChannel, future.cause()); - inboundChannel.close(); } } @@ -317,73 +398,61 @@ protected void messageReceived() { @Override protected void handleConnected(CommandConnected connected) { + checkArgument(state == BackendState.Init, "Unexpected state %s. BackendState.Init was expected.", state); if (log.isDebugEnabled()) { log.debug("[{}] [{}] Received Connected from broker", inboundChannel, outboundChannel); } - if (config.isTlsHostnameVerificationEnabled() && remoteHostName != null - && !verifyTlsHostName(remoteHostName, ctx)) { - // close the connection if host-verification failed with the - // broker - log.warn("[{}] Failed to verify hostname of {}", ctx.channel(), remoteHostName); - ctx.close(); - return; - } - state = BackendState.HandshakeCompleted; - ChannelFuture channelFuture; - if (connected.hasMaxMessageSize()) { - channelFuture = inboundChannel.writeAndFlush( - Commands.newConnected(connected.getProtocolVersion(), connected.getMaxMessageSize())); - } else { - channelFuture = inboundChannel.writeAndFlush(Commands.newConnected(connected.getProtocolVersion())); - } + onHandshakeCompleteAction.run(); + startDirectProxying(connected); - channelFuture.addListener(future -> { - if (service.getProxyLogLevel() == 0) { - if (log.isDebugEnabled()) { - log.debug("[{}] [{}] Removing decoder from pipeline", inboundChannel, outboundChannel); - } - // direct tcp proxy - inboundChannel.pipeline().remove("frameDecoder"); - outboundChannel.pipeline().remove("frameDecoder"); + proxyConnection.brokerConnected(DirectProxyHandler.this, connected); + } + + private void startDirectProxying(CommandConnected connected) { + if (service.getProxyLogLevel() == 0) { + if (log.isDebugEnabled()) { + log.debug("[{}] [{}] Removing decoder from pipeline", inboundChannel, outboundChannel); + } + // direct tcp proxy + inboundChannel.pipeline().remove("frameDecoder"); + outboundChannel.pipeline().remove("frameDecoder"); + } else { + // Enable parsing feature, proxyLogLevel(1 or 2) + // Add parser handler + if (connected.hasMaxMessageSize()) { + inboundChannel.pipeline() + .replace("frameDecoder", "newFrameDecoder", + new LengthFieldBasedFrameDecoder(connected.getMaxMessageSize() + + Commands.MESSAGE_SIZE_FRAME_PADDING, + 0, 4, 0, 4)); + outboundChannel.pipeline().replace("frameDecoder", "newFrameDecoder", + new LengthFieldBasedFrameDecoder( + connected.getMaxMessageSize() + + Commands.MESSAGE_SIZE_FRAME_PADDING, + 0, 4, 0, 4)); + + inboundChannel.pipeline().addBefore("handler", "inboundParser", + new ParserProxyHandler(service, inboundChannel, + ParserProxyHandler.FRONTEND_CONN, + connected.getMaxMessageSize(), outboundChannel.id())); + outboundChannel.pipeline().addBefore("proxyOutboundHandler", "outboundParser", + new ParserProxyHandler(service, outboundChannel, + ParserProxyHandler.BACKEND_CONN, + connected.getMaxMessageSize(), inboundChannel.id())); } else { - // Enable parsing feature, proxyLogLevel(1 or 2) - // Add parser handler - if (connected.hasMaxMessageSize()) { - inboundChannel.pipeline().replace("frameDecoder", "newFrameDecoder", - new LengthFieldBasedFrameDecoder(connected.getMaxMessageSize() - + Commands.MESSAGE_SIZE_FRAME_PADDING, - 0, 4, 0, 4)); - outboundChannel.pipeline().replace("frameDecoder", "newFrameDecoder", - new LengthFieldBasedFrameDecoder( - connected.getMaxMessageSize() - + Commands.MESSAGE_SIZE_FRAME_PADDING, 0, 4, 0, 4)); - - inboundChannel.pipeline().addBefore("handler", "inboundParser", - new ParserProxyHandler(service, inboundChannel, - ParserProxyHandler.FRONTEND_CONN, - connected.getMaxMessageSize())); - outboundChannel.pipeline().addBefore("proxyOutboundHandler", "outboundParser", - new ParserProxyHandler(service, outboundChannel, - ParserProxyHandler.BACKEND_CONN, - connected.getMaxMessageSize())); - } else { - inboundChannel.pipeline().addBefore("handler", "inboundParser", - new ParserProxyHandler(service, inboundChannel, - ParserProxyHandler.FRONTEND_CONN, - Commands.DEFAULT_MAX_MESSAGE_SIZE)); - outboundChannel.pipeline().addBefore("proxyOutboundHandler", "outboundParser", - new ParserProxyHandler(service, outboundChannel, - ParserProxyHandler.BACKEND_CONN, - Commands.DEFAULT_MAX_MESSAGE_SIZE)); - } + inboundChannel.pipeline().addBefore("handler", "inboundParser", + new ParserProxyHandler(service, inboundChannel, + ParserProxyHandler.FRONTEND_CONN, + Commands.DEFAULT_MAX_MESSAGE_SIZE, outboundChannel.id())); + outboundChannel.pipeline().addBefore("proxyOutboundHandler", "outboundParser", + new ParserProxyHandler(service, outboundChannel, + ParserProxyHandler.BACKEND_CONN, + Commands.DEFAULT_MAX_MESSAGE_SIZE, inboundChannel.id())); } - // Start reading from both connections - inboundChannel.read(); - outboundChannel.read(); - }); + } } @Override @@ -396,21 +465,6 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { log.warn("[{}] [{}] Caught exception: {}", inboundChannel, outboundChannel, cause.getMessage(), cause); ctx.close(); } - - public void setRemoteHostName(String remoteHostName) { - this.remoteHostName = remoteHostName; - } - - private boolean verifyTlsHostName(String hostname, ChannelHandlerContext ctx) { - ChannelHandler sslHandler = ctx.channel().pipeline().get("tls"); - - SSLSession sslSession = null; - if (sslHandler != null) { - sslSession = ((SslHandler) sslHandler).engine().getSession(); - return (new TlsHostnameVerifier()).verify(hostname, sslSession); - } - return false; - } } private static final Logger log = LoggerFactory.getLogger(DirectProxyHandler.class); diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java index b14bea56f812c..cbebfa34fa1f4 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/LookupProxyHandler.java @@ -18,14 +18,14 @@ */ package org.apache.pulsar.proxy.server; -import static org.apache.commons.lang3.StringUtils.isBlank; - +import io.netty.buffer.ByteBuf; +import io.prometheus.client.Counter; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.Optional; - +import java.util.concurrent.Semaphore; import org.apache.commons.lang3.StringUtils; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.protocol.Commands; @@ -42,13 +42,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.netty.buffer.ByteBuf; -import io.prometheus.client.Counter; - public class LookupProxyHandler { private final String throttlingErrorMessage = "Too many concurrent lookup and partitionsMetadata requests"; - private final ProxyService service; private final ProxyConnection proxyConnection; + private final BrokerDiscoveryProvider discoveryProvider; private final boolean connectWithTLS; private SocketAddress clientAddress; @@ -83,9 +80,11 @@ public class LookupProxyHandler { .build("pulsar_proxy_rejected_get_topics_of_namespace_requests", "Counter of getTopicsOfNamespace requests rejected due to throttling") .create().register(); + private final Semaphore lookupRequestSemaphore; public LookupProxyHandler(ProxyService proxy, ProxyConnection proxyConnection) { - this.service = proxy; + this.discoveryProvider = proxy.getDiscoveryProvider(); + this.lookupRequestSemaphore = proxy.getLookupRequestSemaphore(); this.proxyConnection = proxyConnection; this.clientAddress = proxyConnection.clientAddress(); this.connectWithTLS = proxy.getConfiguration().isTlsEnabledWithBroker(); @@ -98,28 +97,16 @@ public void handleLookup(CommandLookupTopic lookup) { log.debug("Received Lookup from {}", clientAddress); } long clientRequestId = lookup.getRequestId(); - if (this.service.getLookupRequestSemaphore().tryAcquire()) { - lookupRequests.inc(); - String topic = lookup.getTopic(); - String serviceUrl; - if (isBlank(brokerServiceURL)) { - ServiceLookupData availableBroker = null; - try { - availableBroker = service.getDiscoveryProvider().nextBroker(); - } catch (Exception e) { - log.warn("[{}] Failed to get next active broker {}", clientAddress, e.getMessage(), e); - proxyConnection.ctx().writeAndFlush(Commands.newLookupErrorResponse(ServerError.ServiceNotReady, - e.getMessage(), clientRequestId)); - return; + if (lookupRequestSemaphore.tryAcquire()) { + try { + lookupRequests.inc(); + String serviceUrl = getBrokerServiceUrl(clientRequestId); + if (serviceUrl != null) { + performLookup(clientRequestId, lookup.getTopic(), serviceUrl, false, 10); } - serviceUrl = this.connectWithTLS ? availableBroker.getPulsarServiceUrlTls() - : availableBroker.getPulsarServiceUrl(); - } else { - serviceUrl = this.connectWithTLS ? service.getConfiguration().getBrokerServiceURLTLS() - : service.getConfiguration().getBrokerServiceURL(); + } finally { + lookupRequestSemaphore.release(); } - performLookup(clientRequestId, topic, serviceUrl, false, 10); - this.service.getLookupRequestSemaphore().release(); } else { rejectedLookupRequests.inc(); if (log.isDebugEnabled()) { @@ -164,7 +151,7 @@ private void performLookup(long clientRequestId, String topic, String brokerServ if (t != null) { log.warn("[{}] Failed to lookup topic {}: {}", clientAddress, topic, t.getMessage()); proxyConnection.ctx().writeAndFlush( - Commands.newLookupErrorResponse(ServerError.ServiceNotReady, t.getMessage(), clientRequestId)); + Commands.newLookupErrorResponse(getServerError(t), t.getMessage(), clientRequestId)); } else { String brokerUrl = connectWithTLS ? r.brokerUrlTls : r.brokerUrl; if (r.redirect) { @@ -192,7 +179,7 @@ private void performLookup(long clientRequestId, String topic, String brokerServ }).exceptionally(ex -> { // Failed to connect to backend broker proxyConnection.ctx().writeAndFlush( - Commands.newLookupErrorResponse(ServerError.ServiceNotReady, ex.getMessage(), clientRequestId)); + Commands.newLookupErrorResponse(getServerError(ex), ex.getMessage(), clientRequestId)); return null; }); } @@ -203,9 +190,12 @@ public void handlePartitionMetadataResponse(CommandPartitionedTopicMetadata part log.debug("[{}] Received PartitionMetadataLookup", clientAddress); } final long clientRequestId = partitionMetadata.getRequestId(); - if (this.service.getLookupRequestSemaphore().tryAcquire()) { - handlePartitionMetadataResponse(partitionMetadata, clientRequestId); - this.service.getLookupRequestSemaphore().release(); + if (lookupRequestSemaphore.tryAcquire()) { + try { + handlePartitionMetadataResponse(partitionMetadata, clientRequestId); + } finally { + lookupRequestSemaphore.release(); + } } else { rejectedPartitionsMetadataRequests.inc(); if (log.isDebugEnabled()) { @@ -225,20 +215,16 @@ public void handlePartitionMetadataResponse(CommandPartitionedTopicMetadata part private void handlePartitionMetadataResponse(CommandPartitionedTopicMetadata partitionMetadata, long clientRequestId) { TopicName topicName = TopicName.get(partitionMetadata.getTopic()); - URI brokerURI; - try { - String availableBrokerServiceURL = getBrokerServiceUrl(clientRequestId); - if (availableBrokerServiceURL == null) { - log.warn("No available broker for {} to lookup partition metadata", topicName); - return; - } - brokerURI = new URI(availableBrokerServiceURL); - } catch (URISyntaxException e) { - proxyConnection.ctx().writeAndFlush(Commands.newPartitionMetadataResponse(ServerError.MetadataError, - e.getMessage(), clientRequestId)); + + String serviceUrl = getBrokerServiceUrl(clientRequestId); + if (serviceUrl == null) { + log.warn("No available broker for {} to lookup partition metadata", topicName); + return; + } + InetSocketAddress addr = getAddr(serviceUrl, clientRequestId); + if (addr == null) { return; } - InetSocketAddress addr = new InetSocketAddress(brokerURI.getHost(), brokerURI.getPort()); if (log.isDebugEnabled()) { log.debug("Getting connections to '{}' for Looking up topic '{}' with clientReq Id '{}'", addr, @@ -263,7 +249,7 @@ private void handlePartitionMetadataResponse(CommandPartitionedTopicMetadata par }); }).exceptionally(ex -> { // Failed to connect to backend broker - proxyConnection.ctx().writeAndFlush(Commands.newPartitionMetadataResponse(ServerError.ServiceNotReady, + proxyConnection.ctx().writeAndFlush(Commands.newPartitionMetadataResponse(getServerError(ex), ex.getMessage(), clientRequestId)); return null; }); @@ -277,9 +263,12 @@ public void handleGetTopicsOfNamespace(CommandGetTopicsOfNamespace commandGetTop final long requestId = commandGetTopicsOfNamespace.getRequestId(); - if (this.service.getLookupRequestSemaphore().tryAcquire()) { - handleGetTopicsOfNamespace(commandGetTopicsOfNamespace, requestId); - this.service.getLookupRequestSemaphore().release(); + if (lookupRequestSemaphore.tryAcquire()) { + try { + handleGetTopicsOfNamespace(commandGetTopicsOfNamespace, requestId); + } finally { + lookupRequestSemaphore.release(); + } } else { rejectedGetTopicsOfNamespaceRequests.inc(); if (log.isDebugEnabled()) { @@ -333,7 +322,7 @@ private void performGetTopicsOfNamespace(long clientRequestId, if (t != null) { log.warn("[{}] Failed to get TopicsOfNamespace {}: {}", clientAddress, namespaceName, t.getMessage()); proxyConnection.ctx().writeAndFlush( - Commands.newError(clientRequestId, ServerError.ServiceNotReady, t.getMessage())); + Commands.newError(clientRequestId, getServerError(t), t.getMessage())); } else { proxyConnection.ctx().writeAndFlush( Commands.newGetTopicsOfNamespaceResponse(r, clientRequestId)); @@ -344,7 +333,7 @@ private void performGetTopicsOfNamespace(long clientRequestId, }).exceptionally(ex -> { // Failed to connect to backend broker proxyConnection.ctx().writeAndFlush( - Commands.newError(clientRequestId, ServerError.ServiceNotReady, ex.getMessage())); + Commands.newError(clientRequestId, getServerError(ex), ex.getMessage())); return null; }); } @@ -387,7 +376,7 @@ public void handleGetSchema(CommandGetSchema commandGetSchema) { if (t != null) { log.warn("[{}] Failed to get schema {}: {}", clientAddress, topic, t); proxyConnection.ctx().writeAndFlush( - Commands.newError(clientRequestId, ServerError.ServiceNotReady, t.getMessage())); + Commands.newError(clientRequestId, getServerError(t), t.getMessage())); } else { proxyConnection.ctx().writeAndFlush( Commands.newGetSchemaResponse(clientRequestId, r)); @@ -398,7 +387,7 @@ public void handleGetSchema(CommandGetSchema commandGetSchema) { }).exceptionally(ex -> { // Failed to connect to backend broker proxyConnection.ctx().writeAndFlush( - Commands.newError(clientRequestId, ServerError.ServiceNotReady, ex.getMessage())); + Commands.newError(clientRequestId, getServerError(ex), ex.getMessage())); return null; }); @@ -408,24 +397,20 @@ public void handleGetSchema(CommandGetSchema commandGetSchema) { * Get default broker service url or discovery an available broker **/ private String getBrokerServiceUrl(long clientRequestId) { - if (isBlank(brokerServiceURL)) { - ServiceLookupData availableBroker; - try { - availableBroker = service.getDiscoveryProvider().nextBroker(); - } catch (Exception e) { - log.warn("[{}] Failed to get next active broker {}", clientAddress, e.getMessage(), e); - proxyConnection.ctx().writeAndFlush(Commands.newError( - clientRequestId, ServerError.ServiceNotReady, e.getMessage() - )); - return null; - } - return this.connectWithTLS ? - availableBroker.getPulsarServiceUrlTls() : availableBroker.getPulsarServiceUrl(); - } else { - return this.connectWithTLS ? - service.getConfiguration().getBrokerServiceURLTLS() : service.getConfiguration().getBrokerServiceURL(); + if (StringUtils.isNotBlank(brokerServiceURL)) { + return brokerServiceURL; } - + ServiceLookupData availableBroker; + try { + availableBroker = discoveryProvider.nextBroker(); + } catch (Exception e) { + log.warn("[{}] Failed to get next active broker {}", clientAddress, e.getMessage(), e); + proxyConnection.ctx().writeAndFlush(Commands.newError( + clientRequestId, ServerError.ServiceNotReady, e.getMessage() + )); + return null; + } + return this.connectWithTLS ? availableBroker.getPulsarServiceUrlTls() : availableBroker.getPulsarServiceUrl(); } private InetSocketAddress getAddr(String brokerServiceUrl, long clientRequestId) { diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ParserProxyHandler.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ParserProxyHandler.java index 9ce7a26d99c00..b5681c64bc7c9 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ParserProxyHandler.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ParserProxyHandler.java @@ -19,6 +19,7 @@ package org.apache.pulsar.proxy.server; +import io.netty.channel.ChannelId; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; @@ -56,6 +57,7 @@ public class ParserProxyHandler extends ChannelInboundHandlerAdapter { private String connType; private int maxMessageSize; + private final ChannelId peerChannelId; private final ProxyService service; @@ -64,11 +66,13 @@ public class ParserProxyHandler extends ChannelInboundHandlerAdapter { private static Map producerHashMap = new ConcurrentHashMap<>(); private static Map consumerHashMap = new ConcurrentHashMap<>(); - public ParserProxyHandler(ProxyService service, Channel channel, String type, int maxMessageSize) { + public ParserProxyHandler(ProxyService service, Channel channel, String type, int maxMessageSize, + ChannelId peerChannelId) { this.service = service; this.channel = channel; this.connType = type; this.maxMessageSize = maxMessageSize; + this.peerChannelId = peerChannelId; } private void logging(Channel conn, BaseCommand.Type cmdtype, String info, List messages) throws Exception{ @@ -143,7 +147,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) { logging(ctx.channel() , cmd.getType() , "" , null); break; } - topicName = TopicName.get(ParserProxyHandler.consumerHashMap.get(String.valueOf(cmd.getMessage().getConsumerId()) + "," + DirectProxyHandler.inboundOutboundChannelMap.get(ctx.channel().id()))); + topicName = TopicName.get(ParserProxyHandler.consumerHashMap.get(cmd.getMessage().getConsumerId() + "," + peerChannelId)); msgBytes = new MutableLong(0); MessageParser.parseMessage(topicName, -1L, -1L,buffer,(message) -> { diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java index 665b9f83fd604..283b835fff54f 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyClientCnx.java @@ -18,47 +18,95 @@ */ package org.apache.pulsar.proxy.server; +import static com.google.common.base.Preconditions.checkArgument; import io.netty.buffer.ByteBuf; import io.netty.channel.EventLoopGroup; - +import java.util.Arrays; +import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.PulsarVersion; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; import org.apache.pulsar.common.api.AuthData; +import org.apache.pulsar.common.api.proto.CommandAuthChallenge; import org.apache.pulsar.common.protocol.Commands; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +@Slf4j public class ProxyClientCnx extends ClientCnx { - - String clientAuthRole; - AuthData clientAuthData; - String clientAuthMethod; - int protocolVersion; + private final boolean forwardClientAuthData; + private final String clientAuthMethod; + private final String clientAuthRole; + private final AuthData clientAuthData; + private final ProxyConnection proxyConnection; public ProxyClientCnx(ClientConfigurationData conf, EventLoopGroup eventLoopGroup, String clientAuthRole, - AuthData clientAuthData, String clientAuthMethod, int protocolVersion) { - super(conf, eventLoopGroup); + AuthData clientAuthData, String clientAuthMethod, int protocolVersion, + boolean forwardClientAuthData, ProxyConnection proxyConnection) { + super(conf, eventLoopGroup, protocolVersion); this.clientAuthRole = clientAuthRole; this.clientAuthData = clientAuthData; this.clientAuthMethod = clientAuthMethod; - this.protocolVersion = protocolVersion; + this.forwardClientAuthData = forwardClientAuthData; + this.proxyConnection = proxyConnection; } @Override protected ByteBuf newConnectCommand() throws Exception { if (log.isDebugEnabled()) { - log.debug("New Connection opened via ProxyClientCnx with params clientAuthRole = {}," + - " clientAuthData = {}, clientAuthMethod = {}", + log.debug("New Connection opened via ProxyClientCnx with params clientAuthRole = {}," + + " clientAuthData = {}, clientAuthMethod = {}", clientAuthRole, clientAuthData, clientAuthMethod); } authenticationDataProvider = authentication.getAuthData(remoteHostName); AuthData authData = authenticationDataProvider.authenticate(AuthData.INIT_AUTH_DATA); - return Commands.newConnect(authentication.getAuthMethodName(), authData, this.protocolVersion, - PulsarVersion.getVersion(), proxyToTargetBrokerAddress, clientAuthRole, clientAuthData, - clientAuthMethod); + return Commands.newConnect(authentication.getAuthMethodName(), authData, protocolVersion, + PulsarVersion.getVersion(), proxyToTargetBrokerAddress, clientAuthRole, clientAuthData, + clientAuthMethod); } - private static final Logger log = LoggerFactory.getLogger(ProxyClientCnx.class); + @Override + protected void handleAuthChallenge(CommandAuthChallenge authChallenge) { + checkArgument(authChallenge.hasChallenge()); + checkArgument(authChallenge.getChallenge().hasAuthData()); + + boolean isRefresh = Arrays.equals(AuthData.REFRESH_AUTH_DATA_BYTES, authChallenge.getChallenge().getAuthData()); + if (!forwardClientAuthData || !isRefresh) { + super.handleAuthChallenge(authChallenge); + return; + } + + try { + if (log.isDebugEnabled()) { + log.debug("Proxy {} request to refresh the original client authentication data for " + + "the proxy client {}", proxyConnection.ctx().channel(), ctx.channel()); + } + + proxyConnection.ctx().writeAndFlush(Commands.newAuthChallenge(clientAuthMethod, AuthData.REFRESH_AUTH_DATA, + protocolVersion)) + .addListener(writeFuture -> { + if (writeFuture.isSuccess()) { + if (log.isDebugEnabled()) { + log.debug("Proxy {} sent the auth challenge to original client to refresh credentials " + + "with method {} for the proxy client {}", + proxyConnection.ctx().channel(), clientAuthMethod, ctx.channel()); + } + } else { + log.error("Failed to send the auth challenge to original client by the proxy {} " + + "for the proxy client {}", + proxyConnection.ctx().channel(), + ctx.channel(), + writeFuture.cause()); + closeWithException(writeFuture.cause()); + } + }); + + if (state == State.SentConnectFrame) { + state = State.Connecting; + } + } catch (Exception e) { + log.error("Failed to send the auth challenge to origin client by the proxy {} for the proxy client {}", + proxyConnection.ctx().channel(), ctx.channel(), e); + closeWithException(e); + } + } } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java index 0573670e993ae..d616bbd1ce423 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConfiguration.java @@ -26,6 +26,7 @@ import java.util.Optional; import java.util.Properties; import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -49,6 +50,8 @@ public class ProxyConfiguration implements PulsarConfiguration { @Category private static final String CATEGORY_BROKER_DISCOVERY = "Broker Discovery"; @Category + private static final String CATEGORY_BROKER_PROXY = "Broker Proxy"; + @Category private static final String CATEGORY_AUTHENTICATION = "Proxy Authentication"; @Category private static final String CATEGORY_AUTHORIZATION = "Proxy Authorization"; @@ -136,6 +139,43 @@ public class ProxyConfiguration implements PulsarConfiguration { ) private String functionWorkerWebServiceURLTLS; + @FieldContext(category = CATEGORY_BROKER_PROXY, + doc = "When enabled, checks that the target broker is active before connecting. " + + "zookeeperServers and configurationStoreServers must be configured in proxy configuration " + + "for retrieving the active brokers.") + private boolean checkActiveBrokers = false; + + @FieldContext( + category = CATEGORY_BROKER_PROXY, + doc = "Broker proxy connect timeout.\n" + + "The timeout value for Broker proxy connect timeout is in millisecond. Set to 0 to disable." + ) + private int brokerProxyConnectTimeoutMs = 10000; + + @FieldContext( + category = CATEGORY_BROKER_PROXY, + doc = "Broker proxy read timeout.\n" + + "The timeout value for Broker proxy read timeout is in millisecond. Set to 0 to disable." + ) + private int brokerProxyReadTimeoutMs = 75000; + + @FieldContext( + category = CATEGORY_BROKER_PROXY, + doc = "Allowed broker target host names. " + + "Supports multiple comma separated entries and a wildcard.") + private String brokerProxyAllowedHostNames = "*"; + + @FieldContext( + category = CATEGORY_BROKER_PROXY, + doc = "Allowed broker target ip addresses or ip networks / netmasks. " + + "Supports multiple comma separated entries.") + private String brokerProxyAllowedIPAddresses = "*"; + + @FieldContext( + category = CATEGORY_BROKER_PROXY, + doc = "Allowed broker target ports") + private String brokerProxyAllowedTargetPorts = "6650,6651"; + @FieldContext( category = CATEGORY_SERVER, doc = "Hostname or IP address the service binds on" @@ -175,6 +215,26 @@ public class ProxyConfiguration implements PulsarConfiguration { ) private Optional webServicePortTls = Optional.empty(); + @FieldContext( + category = CATEGORY_KEYSTORE_TLS, + doc = "Specify the TLS provider for the web service, available values can be SunJSSE, Conscrypt and etc." + ) + private String webServiceTlsProvider = "Conscrypt"; + + @FieldContext( + category = CATEGORY_TLS, + doc = "Specify the tls protocols the proxy's web service will use to negotiate during TLS Handshake.\n\n" + + "Example:- [TLSv1.3, TLSv1.2]" + ) + private Set webServiceTlsProtocols = new TreeSet<>(); + + @FieldContext( + category = CATEGORY_TLS, + doc = "Specify the tls cipher the proxy's web service will use to negotiate during TLS Handshake.\n\n" + + "Example:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]" + ) + private Set webServiceTlsCiphers = new TreeSet<>(); + @FieldContext( category = CATEGORY_SERVER, doc = "The directory where nar Extraction happens" @@ -371,7 +431,9 @@ public class ProxyConfiguration implements PulsarConfiguration { @FieldContext( category = CATEGORY_KEYSTORE_TLS, - doc = "TLS Provider" + doc = "Specify the TLS provider for the broker service: \n" + + "When using TLS authentication with CACert, the valid value is either OPENSSL or JDK.\n" + + "When using TLS authentication with KeyStore, available values can be SunJSSE, Conscrypt and etc." ) private String tlsProvider = null; @@ -505,6 +567,20 @@ public class ProxyConfiguration implements PulsarConfiguration { ) private int httpNumThreads = Math.max(8, 2 * Runtime.getRuntime().availableProcessors()); + @FieldContext( + category = CATEGORY_SERVER, + doc = "Number of threads used for Netty IO." + + " Default is set to `2 * Runtime.getRuntime().availableProcessors()`" + ) + private int numIOThreads = 2 * Runtime.getRuntime().availableProcessors(); + + @FieldContext( + category = CATEGORY_SERVER, + doc = "Number of threads used for Netty Acceptor." + + " Default is set to `1`" + ) + private int numAcceptorThreads = 1; + @Deprecated @FieldContext( category = CATEGORY_PLUGIN, diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java index f1b780726489c..bb8f26787455a 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnection.java @@ -19,65 +19,76 @@ package org.apache.pulsar.proxy.server; import static com.google.common.base.Preconditions.checkArgument; - +import static com.google.common.base.Preconditions.checkState; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.haproxy.HAProxyMessage; +import io.netty.handler.ssl.SslHandler; +import io.netty.resolver.dns.DnsAddressResolverGroup; +import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; - +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import javax.naming.AuthenticationException; import javax.net.ssl.SSLSession; - -import io.netty.handler.codec.haproxy.HAProxyMessage; +import lombok.Getter; +import org.apache.pulsar.PulsarVersion; +import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.authentication.AuthenticationDataSource; import org.apache.pulsar.broker.authentication.AuthenticationProvider; import org.apache.pulsar.broker.authentication.AuthenticationState; import org.apache.pulsar.client.api.Authentication; -import org.apache.pulsar.client.api.AuthenticationFactory; import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.client.api.PulsarClientException.UnsupportedAuthenticationException; import org.apache.pulsar.client.impl.ClientCnx; import org.apache.pulsar.client.impl.ConnectionPool; import org.apache.pulsar.client.impl.PulsarChannelInitializer; -import org.apache.pulsar.client.impl.PulsarClientImpl; import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.apache.pulsar.client.impl.conf.ConfigurationDataUtils; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.api.AuthData; -import org.apache.pulsar.common.protocol.Commands; -import org.apache.pulsar.common.protocol.PulsarHandler; import org.apache.pulsar.common.api.proto.CommandAuthResponse; import org.apache.pulsar.common.api.proto.CommandConnect; +import org.apache.pulsar.common.api.proto.CommandConnected; +import org.apache.pulsar.common.api.proto.CommandGetSchema; import org.apache.pulsar.common.api.proto.CommandGetTopicsOfNamespace; import org.apache.pulsar.common.api.proto.CommandLookupTopic; -import org.apache.pulsar.common.api.proto.CommandGetSchema; import org.apache.pulsar.common.api.proto.CommandPartitionedTopicMetadata; import org.apache.pulsar.common.api.proto.ProtocolVersion; import org.apache.pulsar.common.api.proto.ServerError; +import org.apache.pulsar.common.protocol.Commands; +import org.apache.pulsar.common.protocol.PulsarHandler; +import org.apache.pulsar.policies.data.loadbalancer.ServiceLookupData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.ssl.SslHandler; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.FutureListener; -import lombok.Getter; - /** * Handles incoming discovery request from client and sends appropriate response back to client * */ -public class ProxyConnection extends PulsarHandler implements FutureListener { +public class ProxyConnection extends PulsarHandler { + private static final Logger LOG = LoggerFactory.getLogger(ProxyConnection.class); // ConnectionPool is used by the proxy to issue lookup requests - private PulsarClientImpl client; private ConnectionPool connectionPool; - private ProxyService service; + private final AtomicLong requestIdGenerator = + new AtomicLong(ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE / 2)); + private final ProxyService service; + private final DnsAddressResolverGroup dnsAddressResolverGroup; AuthenticationDataSource authenticationData; private State state; - private final Supplier sslHandlerSupplier; private LookupProxyHandler lookupProxyHandler = null; @Getter private DirectProxyHandler directProxyHandler = null; + private final BrokerProxyValidator brokerProxyValidator; String clientAuthRole; AuthData clientAuthData; String clientAuthMethod; @@ -104,23 +115,29 @@ enum State { // Follow redirects ProxyLookupRequests, + // Connecting to the broker + ProxyConnectingToBroker, + // If we are proxying a connection to a specific broker, we // are just forwarding data between the 2 connections, without // looking into it ProxyConnectionToBroker, + Closing, + Closed, } ConnectionPool getConnectionPool() { - return client.getCnxPool(); + return connectionPool; } - public ProxyConnection(ProxyService proxyService, Supplier sslHandlerSupplier) { + public ProxyConnection(ProxyService proxyService, DnsAddressResolverGroup dnsAddressResolverGroup) { super(30, TimeUnit.SECONDS); this.service = proxyService; + this.dnsAddressResolverGroup = dnsAddressResolverGroup; this.state = State.Init; - this.sslHandlerSupplier = sslHandlerSupplier; + this.brokerProxyValidator = service.getBrokerProxyValidator(); } @Override @@ -128,9 +145,9 @@ public void channelRegistered(ChannelHandlerContext ctx) throws Exception { super.channelRegistered(ctx); ProxyService.activeConnections.inc(); if (ProxyService.activeConnections.get() > service.getConfiguration().getMaxConcurrentInboundConnections()) { + state = State.Closing; ctx.close(); ProxyService.rejectedConnections.inc(); - return; } } @@ -149,34 +166,58 @@ public void channelActive(ChannelHandlerContext ctx) throws Exception { } @Override - public void channelInactive(ChannelHandlerContext ctx) throws Exception { + public synchronized void channelInactive(ChannelHandlerContext ctx) throws Exception { super.channelInactive(ctx); - if (directProxyHandler != null && directProxyHandler.outboundChannel != null) { - directProxyHandler.outboundChannel.close(); + if (directProxyHandler != null) { + directProxyHandler.close(); + directProxyHandler = null; } - if (client != null) { - client.close(); - } service.getClientCnxs().remove(this); LOG.info("[{}] Connection closed", remoteAddress); if (connectionPool != null) { try { connectionPool.close(); + connectionPool = null; } catch (Exception e) { LOG.error("Failed to close connection pool {}", e.getMessage(), e); } } + + state = State.Closed; } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { super.exceptionCaught(ctx, cause); - LOG.warn("[{}] Got exception {} : {} {}", remoteAddress, cause.getClass().getSimpleName(), cause.getMessage(), + LOG.warn("[{}] Got exception {} : Message: {} State: {}", remoteAddress, cause.getClass().getSimpleName(), + cause.getMessage(), state, ClientCnx.isKnownException(cause) ? null : cause); - ctx.close(); + if (state != State.Closed) { + state = State.Closing; + } + if (ctx.channel().isOpen()) { + ctx.close(); + } else { + // close connection to broker if that is present + if (directProxyHandler != null) { + directProxyHandler.close(); + directProxyHandler = null; + } + } + } + + @Override + public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception { + if (directProxyHandler != null && directProxyHandler.outboundChannel != null) { + // handle backpressure + // stop/resume reading input from connection between the proxy and the broker + // when the writability of the connection between the client and the proxy changes + directProxyHandler.outboundChannel.config().setAutoRead(ctx.channel().isWritable()); + } + super.channelWritabilityChanged(ctx); } @Override @@ -194,87 +235,170 @@ public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exce break; case ProxyConnectionToBroker: - // Pass the buffer to the outbound connection and schedule next read - // only if we can write on the connection - ProxyService.opsCounter.inc(); - if (msg instanceof ByteBuf) { - int bytes = ((ByteBuf) msg).readableBytes(); - directProxyHandler.getInboundChannelRequestsRate().recordEvent(bytes); - ProxyService.bytesCounter.inc(bytes); + if (directProxyHandler != null) { + ProxyService.opsCounter.inc(); + if (msg instanceof ByteBuf) { + int bytes = ((ByteBuf) msg).readableBytes(); + directProxyHandler.getInboundChannelRequestsRate().recordEvent(bytes); + ProxyService.bytesCounter.inc(bytes); + } + directProxyHandler.outboundChannel.writeAndFlush(msg) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); + } else { + LOG.warn("Received message of type {} while connection to broker is missing in state {}. " + + "Dropping the input message (readable bytes={}).", msg.getClass(), state, + msg instanceof ByteBuf ? ((ByteBuf) msg).readableBytes() : -1); } - directProxyHandler.outboundChannel.writeAndFlush(msg).addListener(this); break; - + case ProxyConnectingToBroker: + LOG.warn("Received message of type {} while connecting to broker. " + + "Dropping the input message (readable bytes={}).", msg.getClass(), + msg instanceof ByteBuf ? ((ByteBuf) msg).readableBytes() : -1); + break; default: break; } } - @Override - public void operationComplete(Future future) throws Exception { - // This is invoked when the write operation on the paired connection is - // completed - if (future.isSuccess()) { - ctx.read(); + private synchronized void completeConnect(AuthData clientData) throws PulsarClientException { + Supplier clientCnxSupplier; + if (service.getConfiguration().isAuthenticationEnabled()) { + if (service.getConfiguration().isForwardAuthorizationCredentials()) { + this.clientAuthData = clientData; + this.clientAuthMethod = authMethod; + } + clientCnxSupplier = () -> new ProxyClientCnx(clientConf, service.getWorkerGroup(), clientAuthRole, + clientAuthData, clientAuthMethod, protocolVersionToAdvertise, + service.getConfiguration().isForwardAuthorizationCredentials(), this); } else { - LOG.warn("[{}] Error in writing to inbound channel. Closing", remoteAddress, future.cause()); - directProxyHandler.outboundChannel.close(); + clientCnxSupplier = () -> new ClientCnx(clientConf, service.getWorkerGroup(), protocolVersionToAdvertise); + } + + if (this.connectionPool == null) { + this.connectionPool = new ConnectionPool(clientConf, service.getWorkerGroup(), + clientCnxSupplier, + Optional.of(dnsAddressResolverGroup.getResolver(service.getWorkerGroup().next()))); + } else { + LOG.error("BUG! Connection Pool has already been created for proxy connection to {} state {} role {}", + remoteAddress, state, clientAuthRole); } - } - private void completeConnect() { LOG.info("[{}] complete connection, init proxy handler. authenticated with {} role {}, hasProxyToBrokerUrl: {}", - remoteAddress, authMethod, clientAuthRole, hasProxyToBrokerUrl); + remoteAddress, authMethod, clientAuthRole, hasProxyToBrokerUrl); if (hasProxyToBrokerUrl) { - // Client already knows which broker to connect. Let's open a - // connection there and just pass bytes in both directions - state = State.ProxyConnectionToBroker; - directProxyHandler = new DirectProxyHandler(service, this, proxyToBrokerUrl, - protocolVersionToAdvertise, sslHandlerSupplier); - cancelKeepAliveTask(); + // Optimize proxy connection to fail-fast if the target broker isn't active + // Pulsar client will retry connecting after a back off timeout + if (service.getConfiguration().isCheckActiveBrokers() + && !isBrokerActive(proxyToBrokerUrl)) { + state = State.Closing; + LOG.warn("[{}] Target broker '{}' isn't available. authenticated with {} role {}.", + remoteAddress, proxyToBrokerUrl, authMethod, clientAuthRole); + ctx() + .writeAndFlush( + Commands.newError(-1, ServerError.ServiceNotReady, "Target broker isn't available.")) + .addListener(ChannelFutureListener.CLOSE); + return; + } + + state = State.ProxyConnectingToBroker; + brokerProxyValidator.resolveAndCheckTargetAddress(proxyToBrokerUrl) + .thenAcceptAsync(this::connectToBroker, ctx.executor()) + .exceptionally(throwable -> { + if (throwable instanceof TargetAddressDeniedException + || throwable.getCause() instanceof TargetAddressDeniedException) { + TargetAddressDeniedException targetAddressDeniedException = + (TargetAddressDeniedException) (throwable instanceof TargetAddressDeniedException + ? throwable : throwable.getCause()); + + LOG.warn("[{}] Target broker '{}' cannot be validated. {}. authenticated with {} role {}.", + remoteAddress, proxyToBrokerUrl, targetAddressDeniedException.getMessage(), + authMethod, clientAuthRole); + } else { + LOG.error("[{}] Error validating target broker '{}'. authenticated with {} role {}.", + remoteAddress, proxyToBrokerUrl, authMethod, clientAuthRole, throwable); + } + ctx() + .writeAndFlush( + Commands.newError(-1, ServerError.ServiceNotReady, + "Target broker cannot be validated.")) + .addListener(ChannelFutureListener.CLOSE); + return null; + }); } else { // Client is doing a lookup, we can consider the handshake complete // and we'll take care of just topics and // partitions metadata lookups state = State.ProxyLookupRequests; lookupProxyHandler = new LookupProxyHandler(service, this); - ctx.writeAndFlush(Commands.newConnected(protocolVersionToAdvertise)); + ctx.writeAndFlush(Commands.newConnected(protocolVersionToAdvertise)) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); } } - private void createClientAndCompleteConnect(AuthData clientData) - throws PulsarClientException { - if (service.getConfiguration().isForwardAuthorizationCredentials()) { - this.clientAuthData = clientData; - this.clientAuthMethod = authMethod; + private void handleBrokerConnected(DirectProxyHandler directProxyHandler, CommandConnected connected) { + checkState(ctx.executor().inEventLoop(), "This method should be called in the event loop"); + if (state == State.ProxyConnectingToBroker && ctx.channel().isOpen() && this.directProxyHandler == null) { + this.directProxyHandler = directProxyHandler; + state = State.ProxyConnectionToBroker; + int maxMessageSize = + connected.hasMaxMessageSize() ? connected.getMaxMessageSize() : Commands.INVALID_MAX_MESSAGE_SIZE; + ctx.writeAndFlush(Commands.newConnected(connected.getProtocolVersion(), maxMessageSize)) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); + } else { + LOG.warn("[{}] Channel is {}. ProxyConnection is in {}. " + + "Closing connection to broker '{}'.", + remoteAddress, ctx.channel().isOpen() ? "open" : "already closed", + state != State.ProxyConnectingToBroker ? "invalid state " + state : "state " + state, + proxyToBrokerUrl); + directProxyHandler.close(); + ctx.close(); } - this.client = createClient(clientConf, this.clientAuthData, this.clientAuthMethod, protocolVersionToAdvertise); + } - completeConnect(); + private void connectToBroker(InetSocketAddress brokerAddress) { + checkState(ctx.executor().inEventLoop(), "This method should be called in the event loop"); + DirectProxyHandler directProxyHandler = new DirectProxyHandler(service, this); + directProxyHandler.connect(proxyToBrokerUrl, brokerAddress, protocolVersionToAdvertise); + } + + public void brokerConnected(DirectProxyHandler directProxyHandler, CommandConnected connected) { + try { + final CommandConnected finalConnected = new CommandConnected().copyFrom(connected); + ctx.executor().submit(() -> handleBrokerConnected(directProxyHandler, finalConnected)); + } catch (RejectedExecutionException e) { + LOG.error("Event loop was already closed. Closing broker connection.", e); + directProxyHandler.close(); + } } // According to auth result, send newConnected or newAuthChallenge command. - private void doAuthentication(AuthData clientData) throws Exception { + private void doAuthentication(AuthData clientData) + throws Exception { AuthData brokerData = authState.authenticate(clientData); // authentication has completed, will send newConnected command. if (authState.isComplete()) { clientAuthRole = authState.getAuthRole(); if (LOG.isDebugEnabled()) { LOG.debug("[{}] Client successfully authenticated with {} role {}", - remoteAddress, authMethod, clientAuthRole); + remoteAddress, authMethod, clientAuthRole); + } + + // First connection + if (this.connectionPool == null || state == State.Connecting) { + // authentication has completed, will send newConnected command. + completeConnect(clientData); } - createClientAndCompleteConnect(clientData); return; } // auth not complete, continue auth with client side. - ctx.writeAndFlush(Commands.newAuthChallenge(authMethod, brokerData, protocolVersionToAdvertise)); + ctx.writeAndFlush(Commands.newAuthChallenge(authMethod, brokerData, protocolVersionToAdvertise)) + .addListener(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE); if (LOG.isDebugEnabled()) { LOG.debug("[{}] Authentication in progress client by method {}.", - remoteAddress, authMethod); + remoteAddress, authMethod); } state = State.Connecting; - return; } @Override @@ -295,6 +419,7 @@ remoteAddress, protocolVersionToAdvertise, getRemoteEndpointProtocolVersion(), if (getRemoteEndpointProtocolVersion() < ProtocolVersion.v10.getValue()) { LOG.warn("[{}] Client doesn't support connecting through proxy", remoteAddress); + state = State.Closing; ctx.close(); return; } @@ -302,16 +427,10 @@ remoteAddress, protocolVersionToAdvertise, getRemoteEndpointProtocolVersion(), try { // init authn this.clientConf = createClientConfiguration(); - int protocolVersion = getProtocolVersionToAdvertise(connect); // authn not enabled, complete if (!service.getConfiguration().isAuthenticationEnabled()) { - this.connectionPool = new ProxyConnectionPool(clientConf, service.getWorkerGroup(), - () -> new ClientCnx(clientConf, service.getWorkerGroup(), protocolVersion)); - this.client = - new PulsarClientImpl(clientConf, service.getWorkerGroup(), connectionPool, service.getTimer()); - - completeConnect(); + completeConnect(null); return; } @@ -336,7 +455,7 @@ remoteAddress, protocolVersionToAdvertise, getRemoteEndpointProtocolVersion(), .orElseThrow(() -> new AuthenticationException("No anonymous role, and no authentication provider configured")); - createClientAndCompleteConnect(clientData); + completeConnect(clientData); return; } @@ -352,31 +471,75 @@ remoteAddress, protocolVersionToAdvertise, getRemoteEndpointProtocolVersion(), doAuthentication(clientData); } catch (Exception e) { LOG.warn("[{}] Unable to authenticate: ", remoteAddress, e); - ctx.writeAndFlush(Commands.newError(-1, ServerError.AuthenticationError, "Failed to authenticate")); - close(); - return; + ctx.writeAndFlush(Commands.newError(-1, ServerError.AuthenticationError, "Failed to authenticate")) + .addListener(ChannelFutureListener.CLOSE); } } @Override protected void handleAuthResponse(CommandAuthResponse authResponse) { - checkArgument(state == State.Connecting); checkArgument(authResponse.hasResponse()); checkArgument(authResponse.getResponse().hasAuthData() && authResponse.getResponse().hasAuthMethodName()); if (LOG.isDebugEnabled()) { LOG.debug("Received AuthResponse from {}, auth method: {}", - remoteAddress, authResponse.getResponse().getAuthMethodName()); + remoteAddress, authResponse.getResponse().getAuthMethodName()); } try { AuthData clientData = AuthData.of(authResponse.getResponse().getAuthData()); doAuthentication(clientData); + if (service.getConfiguration().isForwardAuthorizationCredentials() + && connectionPool != null && state == State.ProxyLookupRequests) { + connectionPool.getConnections().forEach(toBrokerCnxFuture -> { + String clientVersion; + if (authResponse.hasClientVersion()) { + clientVersion = authResponse.getClientVersion(); + } else { + clientVersion = PulsarVersion.getVersion(); + } + int protocolVersion; + if (authResponse.hasProtocolVersion()) { + protocolVersion = authResponse.getProtocolVersion(); + } else { + protocolVersion = Commands.getCurrentProtocolVersion(); + } + + ByteBuf cmd = + Commands.newAuthResponse(clientAuthMethod, clientData, protocolVersion, clientVersion); + toBrokerCnxFuture.thenAccept(toBrokerCnx -> toBrokerCnx.ctx().writeAndFlush(cmd) + .addListener(writeFuture -> { + if (writeFuture.isSuccess()) { + if (LOG.isDebugEnabled()) { + LOG.debug("{} authentication is refreshed successfully by {}, " + + "auth method: {} ", + toBrokerCnx.ctx().channel(), ctx.channel(), clientAuthMethod); + } + } else { + LOG.error("Failed to forward the auth response " + + "from the proxy to the broker through the proxy client, " + + "proxy: {}, proxy client: {}", + ctx.channel(), + toBrokerCnx.ctx().channel(), + writeFuture.cause()); + toBrokerCnx.ctx().channel().pipeline() + .fireExceptionCaught(writeFuture.cause()); + } + })) + .whenComplete((__, ex) -> { + if (ex != null) { + LOG.error("Failed to forward the auth response from the proxy to " + + "the broker through the proxy client, proxy: {}", + ctx().channel(), ex); + } + }); + }); + } } catch (Exception e) { String msg = "Unable to handleAuthResponse"; LOG.warn("[{}] {} ", remoteAddress, msg, e); - ctx.writeAndFlush(Commands.newError(-1, ServerError.AuthenticationError, msg)); - close(); + ctx.writeAndFlush(Commands.newError(-1, ServerError.AuthenticationError, msg)) + .addListener(ChannelFutureListener.CLOSE); } } @@ -409,25 +572,24 @@ protected void handleLookup(CommandLookupTopic lookup) { lookupProxyHandler.handleLookup(lookup); } - private void close() { - state = State.Closed; - ctx.close(); - try { - if (client != null) { - client.close(); - } - } catch (PulsarClientException e) { - LOG.error("Unable to close pulsar client - {}. Error - {}", client, e.getMessage()); - } - } - - ClientConfigurationData createClientConfiguration() throws UnsupportedAuthenticationException { - ClientConfigurationData clientConf = new ClientConfigurationData(); - clientConf.setServiceUrl(service.getServiceUrl()); + ClientConfigurationData createClientConfiguration() { + ClientConfigurationData initialConf = new ClientConfigurationData(); ProxyConfiguration proxyConfig = service.getConfiguration(); + initialConf.setServiceUrl( + proxyConfig.isTlsEnabledWithBroker() ? service.getServiceUrlTls() : service.getServiceUrl()); + + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + Map overrides = PropertiesUtils + .filterAndMapProperties(proxyConfig.getProperties(), "brokerClient_"); + ClientConfigurationData clientConf = ConfigurationDataUtils + .loadData(overrides, initialConf, ClientConfigurationData.class); + clientConf.setAuthentication(this.getClientAuthentication()); if (proxyConfig.isTlsEnabledWithBroker()) { clientConf.setUseTls(true); + clientConf.setTlsHostnameVerificationEnable(proxyConfig.isTlsHostnameVerificationEnabled()); if (proxyConfig.isBrokerClientTlsEnabledWithKeyStore()) { clientConf.setUseKeyStoreTls(true); clientConf.setTlsTrustStoreType(proxyConfig.getBrokerClientTlsTrustStoreType()); @@ -441,20 +603,12 @@ ClientConfigurationData createClientConfiguration() throws UnsupportedAuthentica return clientConf; } - private PulsarClientImpl createClient(final ClientConfigurationData clientConf, final AuthData clientAuthData, - final String clientAuthMethod, final int protocolVersion) throws PulsarClientException { - this.connectionPool = new ProxyConnectionPool(clientConf, service.getWorkerGroup(), - () -> new ProxyClientCnx(clientConf, service.getWorkerGroup(), clientAuthRole, clientAuthData, - clientAuthMethod, protocolVersion)); - return new PulsarClientImpl(clientConf, service.getWorkerGroup(), connectionPool, service.getTimer()); - } - private static int getProtocolVersionToAdvertise(CommandConnect connect) { return Math.min(connect.getProtocolVersion(), Commands.getCurrentProtocolVersion()); } long newRequestId() { - return client.newRequestId(); + return requestIdGenerator.getAndIncrement(); } public Authentication getClientAuthentication() { @@ -482,6 +636,36 @@ public HAProxyMessage getHAProxyMessage() { return haProxyMessage; } - private static final Logger LOG = LoggerFactory.getLogger(ProxyConnection.class); + private boolean isBrokerActive(String targetBrokerHostPort) { + for (ServiceLookupData serviceLookupData : getAvailableBrokers()) { + if (matchesHostAndPort("pulsar://", serviceLookupData.getPulsarServiceUrl(), targetBrokerHostPort) + || matchesHostAndPort("pulsar+ssl://", serviceLookupData.getPulsarServiceUrlTls(), + targetBrokerHostPort)) { + return true; + } + } + return false; + } + + private List getAvailableBrokers() { + if (service.getDiscoveryProvider() == null) { + LOG.warn("Unable to retrieve active brokers. service.getDiscoveryProvider() is null." + + "zookeeperServers and configurationStoreServers must be configured in proxy configuration " + + "when checkActiveBrokers is enabled."); + return Collections.emptyList(); + } + try { + return service.getDiscoveryProvider().getAvailableBrokers(); + } catch (PulsarServerException e) { + LOG.error("Unable to get available brokers", e); + return Collections.emptyList(); + } + } + static boolean matchesHostAndPort(String expectedPrefix, String pulsarServiceUrl, String brokerHostPort) { + return pulsarServiceUrl != null + && pulsarServiceUrl.length() == expectedPrefix.length() + brokerHostPort.length() + && pulsarServiceUrl.startsWith(expectedPrefix) + && pulsarServiceUrl.startsWith(brokerHostPort, expectedPrefix.length()); + } } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnectionPool.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnectionPool.java deleted file mode 100644 index cd1b31d343455..0000000000000 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyConnectionPool.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.proxy.server; - -import java.io.IOException; -import java.util.concurrent.ExecutionException; -import java.util.function.Supplier; - -import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.client.impl.ClientCnx; -import org.apache.pulsar.client.impl.ConnectionPool; -import org.apache.pulsar.client.impl.conf.ClientConfigurationData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import io.netty.channel.EventLoopGroup; - -public class ProxyConnectionPool extends ConnectionPool { - public ProxyConnectionPool(ClientConfigurationData clientConfig, EventLoopGroup eventLoopGroup, - Supplier clientCnxSupplier) throws PulsarClientException { - super(clientConfig, eventLoopGroup, clientCnxSupplier); - } - - @Override - public void close() throws IOException { - log.info("Closing ProxyConnectionPool."); - pool.forEach((address, clientCnxPool) -> { - if (clientCnxPool != null) { - clientCnxPool.forEach((identifier, clientCnx) -> { - if (clientCnx != null && clientCnx.isDone()) { - try { - clientCnx.get().close(); - } catch (InterruptedException | ExecutionException e) { - log.error("Unable to close get client connection future.", e); - } - } - }); - } - }); - dnsResolver.close(); - } - - private static final Logger log = LoggerFactory.getLogger(ProxyConnectionPool.class); -} diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyService.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyService.java index af5b2a8ec9409..10e122e794d3c 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyService.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyService.java @@ -29,9 +29,9 @@ import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; import io.netty.channel.socket.SocketChannel; +import io.netty.resolver.dns.DnsAddressResolverGroup; +import io.netty.resolver.dns.DnsNameResolverBuilder; import io.netty.util.concurrent.DefaultThreadFactory; -import io.netty.util.HashedWheelTimer; -import io.netty.util.Timer; import io.prometheus.client.Counter; import io.prometheus.client.Gauge; import java.io.Closeable; @@ -58,6 +58,7 @@ import org.apache.pulsar.client.impl.auth.AuthenticationDisabled; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.util.netty.DnsResolverUtil; import org.apache.pulsar.common.util.netty.EventLoopUtil; import org.apache.pulsar.metadata.api.MetadataStoreException; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; @@ -73,7 +74,10 @@ public class ProxyService implements Closeable { private final ProxyConfiguration proxyConfig; private final Authentication proxyClientAuthentication; - private final Timer timer; + @Getter + private final DnsAddressResolverGroup dnsAddressResolverGroup; + @Getter + private final BrokerProxyValidator brokerProxyValidator; private String serviceUrl; private String serviceUrlTls; private final AuthenticationService authenticationService; @@ -102,8 +106,6 @@ public class ProxyService implements Closeable { private final ScheduledExecutorService statsExecutor; - private static final int numThreads = Runtime.getRuntime().availableProcessors(); - static final Gauge activeConnections = Gauge .build("pulsar_proxy_active_connections", "Number of connections currently active in the proxy").create() .register(); @@ -133,7 +135,6 @@ public ProxyService(ProxyConfiguration proxyConfig, AuthenticationService authenticationService) throws Exception { checkNotNull(proxyConfig); this.proxyConfig = proxyConfig; - this.timer = new HashedWheelTimer(new DefaultThreadFactory("pulsar-timer", Thread.currentThread().isDaemon()), 1, TimeUnit.MILLISECONDS); this.clientCnxs = Sets.newConcurrentHashSet(); this.topicStats = Maps.newConcurrentMap(); @@ -145,10 +146,23 @@ public ProxyService(ProxyConfiguration proxyConfig, } else { proxyLogLevel = 0; } - this.acceptorGroup = EventLoopUtil.newEventLoopGroup(1, false, acceptorThreadFactory); - this.workerGroup = EventLoopUtil.newEventLoopGroup(numThreads, false, workersThreadFactory); + this.acceptorGroup = EventLoopUtil.newEventLoopGroup(proxyConfig.getNumAcceptorThreads(), + false, acceptorThreadFactory); + this.workerGroup = EventLoopUtil.newEventLoopGroup(proxyConfig.getNumIOThreads(), + false, workersThreadFactory); this.authenticationService = authenticationService; + DnsNameResolverBuilder dnsNameResolverBuilder = new DnsNameResolverBuilder() + .channelType(EventLoopUtil.getDatagramChannelClass(workerGroup)); + DnsResolverUtil.applyJdkDnsCacheSettings(dnsNameResolverBuilder); + + dnsAddressResolverGroup = new DnsAddressResolverGroup(dnsNameResolverBuilder); + + brokerProxyValidator = new BrokerProxyValidator(dnsAddressResolverGroup.getResolver(workerGroup.next()), + proxyConfig.getBrokerProxyAllowedHostNames(), + proxyConfig.getBrokerProxyAllowedIPAddresses(), + proxyConfig.getBrokerProxyAllowedTargetPorts()); + // Initialize the message protocol handlers proxyExtensions = ProxyExtensions.load(proxyConfig); proxyExtensions.initialize(proxyConfig); @@ -277,6 +291,8 @@ public BrokerDiscoveryProvider getDiscoveryProvider() { } public void close() throws IOException { + dnsAddressResolverGroup.close(); + if (discoveryProvider != null) { discoveryProvider.close(); } @@ -314,9 +330,6 @@ public void close() throws IOException { } acceptorGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); - if (timer != null) { - timer.stop(); - } } public String getServiceUrl() { @@ -331,10 +344,6 @@ public ProxyConfiguration getConfiguration() { return proxyConfig; } - public Timer getTimer() { - return timer; - } - public AuthenticationService getAuthenticationService() { return authenticationService; } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java index 235927c98c5e2..e358ffef37c93 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ProxyServiceStarter.java @@ -27,6 +27,7 @@ import static org.slf4j.bridge.SLF4JBridgeHandler.removeHandlersForRootLogger; import com.google.common.annotations.VisibleForTesting; +import lombok.Getter; import org.apache.logging.log4j.core.util.datetime.FixedDateFormat; import org.apache.pulsar.broker.PulsarServerException; import org.apache.pulsar.broker.ServiceConfiguration; @@ -93,6 +94,7 @@ public class ProxyServiceStarter { private ProxyConfiguration config; + @Getter private ProxyService proxyService; private WebServer server; @@ -108,6 +110,7 @@ public ProxyServiceStarter(String[] args) throws Exception { FixedDateFormat.FixedFormat.ISO8601_OFFSET_DATE_TIME_HHMM.getPattern()); Thread.setDefaultUncaughtExceptionHandler((thread, exception) -> { System.out.println(String.format("%s [%s] error Uncaught exception in thread %s: %s", dateFormat.format(new Date()), thread.getContextClassLoader(), thread.getName(), exception.getMessage())); + exception.printStackTrace(System.out); }); JCommander jcommander = new JCommander(); @@ -232,7 +235,6 @@ public static void addWebServerHandlers(WebServer server, AdminProxyHandler adminProxyHandler = new AdminProxyHandler(config, discoveryProvider); ServletHolder servletHolder = new ServletHolder(adminProxyHandler); - servletHolder.setInitParameter("preserveHost", "true"); server.addServlet("/admin", servletHolder); server.addServlet("/lookup", servletHolder); @@ -312,6 +314,11 @@ public ProxyConfiguration getConfig() { return config; } + @VisibleForTesting + public WebServer getServer() { + return server; + } + private static final Logger log = LoggerFactory.getLogger(ProxyServiceStarter.class); } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ServiceChannelInitializer.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ServiceChannelInitializer.java index 658dd8762a7f2..db2574f0df161 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ServiceChannelInitializer.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/ServiceChannelInitializer.java @@ -18,15 +18,13 @@ */ package org.apache.pulsar.proxy.server; -import static org.apache.commons.lang3.StringUtils.isEmpty; import io.netty.handler.ssl.SslHandler; -import java.util.function.Supplier; -import org.apache.pulsar.client.api.AuthenticationDataProvider; -import org.apache.pulsar.client.api.AuthenticationFactory; +import io.netty.handler.ssl.SslProvider; +import io.netty.handler.timeout.ReadTimeoutHandler; +import java.util.concurrent.TimeUnit; import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.protocol.OptionalProxyProtocolDecoder; -import org.apache.pulsar.common.util.NettyClientSslContextRefresher; import org.apache.pulsar.common.util.NettyServerSslContextBuilder; import io.netty.channel.ChannelInitializer; @@ -46,11 +44,10 @@ public class ServiceChannelInitializer extends ChannelInitializer private final ProxyService proxyService; private final boolean enableTls; private final boolean tlsEnabledWithKeyStore; + private final int brokerProxyReadTimeoutMs; private SslContextAutoRefreshBuilder serverSslCtxRefresher; - private SslContextAutoRefreshBuilder clientSslCtxRefresher; private NettySSLContextAutoRefreshBuilder serverSSLContextAutoRefreshBuilder; - private NettySSLContextAutoRefreshBuilder clientSSLContextAutoRefreshBuilder; public ServiceChannelInitializer(ProxyService proxyService, ProxyConfiguration serviceConfig, boolean enableTls) throws Exception { @@ -58,6 +55,7 @@ public ServiceChannelInitializer(ProxyService proxyService, ProxyConfiguration s this.proxyService = proxyService; this.enableTls = enableTls; this.tlsEnabledWithKeyStore = serviceConfig.isTlsEnabledWithKeyStore(); + this.brokerProxyReadTimeoutMs = serviceConfig.getBrokerProxyReadTimeoutMs(); if (enableTls) { if (tlsEnabledWithKeyStore) { @@ -75,7 +73,13 @@ public ServiceChannelInitializer(ProxyService proxyService, ProxyConfiguration s serviceConfig.getTlsProtocols(), serviceConfig.getTlsCertRefreshCheckDurationSec()); } else { - serverSslCtxRefresher = new NettyServerSslContextBuilder(serviceConfig.isTlsAllowInsecureConnection(), + SslProvider sslProvider = null; + if (serviceConfig.getTlsProvider() != null) { + sslProvider = SslProvider.valueOf(serviceConfig.getTlsProvider()); + } + serverSslCtxRefresher = new NettyServerSslContextBuilder( + sslProvider, + serviceConfig.isTlsAllowInsecureConnection(), serviceConfig.getTlsTrustCertsFilePath(), serviceConfig.getTlsCertificateFilePath(), serviceConfig.getTlsKeyFilePath(), serviceConfig.getTlsCiphers(), serviceConfig.getTlsProtocols(), serviceConfig.isTlsRequireTrustedClientCertOnConnect(), @@ -84,36 +88,6 @@ public ServiceChannelInitializer(ProxyService proxyService, ProxyConfiguration s } else { this.serverSslCtxRefresher = null; } - - if (serviceConfig.isTlsEnabledWithBroker()) { - AuthenticationDataProvider authData = null; - - if (!isEmpty(serviceConfig.getBrokerClientAuthenticationPlugin())) { - authData = AuthenticationFactory.create(serviceConfig.getBrokerClientAuthenticationPlugin(), - serviceConfig.getBrokerClientAuthenticationParameters()).getAuthData(); - } - - if (tlsEnabledWithKeyStore) { - clientSSLContextAutoRefreshBuilder = new NettySSLContextAutoRefreshBuilder( - serviceConfig.getBrokerClientSslProvider(), - serviceConfig.isTlsAllowInsecureConnection(), - serviceConfig.getBrokerClientTlsTrustStoreType(), - serviceConfig.getBrokerClientTlsTrustStore(), - serviceConfig.getBrokerClientTlsTrustStorePassword(), - serviceConfig.getBrokerClientTlsCiphers(), - serviceConfig.getBrokerClientTlsProtocols(), - serviceConfig.getTlsCertRefreshCheckDurationSec(), - authData); - } else { - clientSslCtxRefresher = new NettyClientSslContextRefresher( - serviceConfig.isTlsAllowInsecureConnection(), - serviceConfig.getBrokerClientTrustCertsFilePath(), - authData, - serviceConfig.getTlsCertRefreshCheckDurationSec()); - } - } else { - this.clientSslCtxRefresher = null; - } } @Override @@ -127,31 +101,16 @@ protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(TLS_HANDLER, new SslHandler(serverSSLContextAutoRefreshBuilder.get().createSSLEngine())); } + if (brokerProxyReadTimeoutMs > 0) { + ch.pipeline().addLast("readTimeoutHandler", + new ReadTimeoutHandler(brokerProxyReadTimeoutMs, TimeUnit.MILLISECONDS)); + } if (proxyService.getConfiguration().isHaProxyProtocolEnabled()) { ch.pipeline().addLast(OptionalProxyProtocolDecoder.NAME, new OptionalProxyProtocolDecoder()); } ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder( Commands.DEFAULT_MAX_MESSAGE_SIZE + Commands.MESSAGE_SIZE_FRAME_PADDING, 0, 4, 0, 4)); - Supplier sslHandlerSupplier = null; - if (clientSslCtxRefresher != null) { - sslHandlerSupplier = new Supplier() { - @Override - public SslHandler get() { - return clientSslCtxRefresher.get().newHandler(ch.alloc()); - } - }; - } else if (clientSSLContextAutoRefreshBuilder != null) { - sslHandlerSupplier = new Supplier() { - @Override - public SslHandler get() { - return new SslHandler(clientSSLContextAutoRefreshBuilder.get().createSSLEngine()); - } - }; - } - - ch.pipeline().addLast("handler", - new ProxyConnection(proxyService, sslHandlerSupplier)); - + ch.pipeline().addLast("handler", new ProxyConnection(proxyService, proxyService.getDnsAddressResolverGroup())); } } diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/BacklogQuotaMixIn.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/TargetAddressDeniedException.java similarity index 80% rename from pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/BacklogQuotaMixIn.java rename to pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/TargetAddressDeniedException.java index a1562400f0818..e62525fbca175 100644 --- a/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/BacklogQuotaMixIn.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/TargetAddressDeniedException.java @@ -16,11 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.pulsar.common.policies.data; -import com.fasterxml.jackson.annotation.JsonAlias; +package org.apache.pulsar.proxy.server; -public abstract class BacklogQuotaMixIn { - @JsonAlias("limit") - private long limitSize; +class TargetAddressDeniedException extends RuntimeException { + public TargetAddressDeniedException(String message) { + super(message); + } } diff --git a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java index 8a9956ca493fb..b8004f916fd8c 100644 --- a/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java +++ b/pulsar-proxy/src/main/java/org/apache/pulsar/proxy/server/WebServer.java @@ -37,8 +37,7 @@ import org.apache.pulsar.broker.web.RateLimitingFilter; import org.apache.pulsar.broker.web.JettyRequestLogFactory; import org.apache.pulsar.broker.web.WebExecutorThreadPool; -import org.apache.pulsar.common.util.SecurityUtility; -import org.apache.pulsar.common.util.keystoretls.KeyStoreSSLContext; +import org.apache.pulsar.jetty.tls.JettySslContextFactory; import org.eclipse.jetty.server.Connector; import org.eclipse.jetty.server.Handler; import org.eclipse.jetty.server.HttpConfiguration; @@ -100,8 +99,8 @@ public WebServer(ProxyConfiguration config, AuthenticationService authentication try { SslContextFactory sslCtxFactory; if (config.isTlsEnabledWithKeyStore()) { - sslCtxFactory = KeyStoreSSLContext.createSslContextFactory( - config.getTlsProvider(), + sslCtxFactory = JettySslContextFactory.createServerSslContextWithKeystore( + config.getWebServiceTlsProvider(), config.getTlsKeyStoreType(), config.getTlsKeyStore(), config.getTlsKeyStorePassword(), @@ -110,16 +109,20 @@ public WebServer(ProxyConfiguration config, AuthenticationService authentication config.getTlsTrustStore(), config.getTlsTrustStorePassword(), config.isTlsRequireTrustedClientCertOnConnect(), + config.getWebServiceTlsCiphers(), + config.getWebServiceTlsProtocols(), config.getTlsCertRefreshCheckDurationSec() ); } else { - sslCtxFactory = SecurityUtility.createSslContextFactory( + sslCtxFactory = JettySslContextFactory.createServerSslContext( + config.getWebServiceTlsProvider(), config.isTlsAllowInsecureConnection(), config.getTlsTrustCertsFilePath(), config.getTlsCertificateFilePath(), config.getTlsKeyFilePath(), config.isTlsRequireTrustedClientCertOnConnect(), - true, + config.getWebServiceTlsCiphers(), + config.getWebServiceTlsProtocols(), config.getTlsCertRefreshCheckDurationSec()); } connectorTls = new ServerConnector(server, 1, 1, sslCtxFactory); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/extensions/ProxyExtensionWithClassLoaderTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/extensions/ProxyExtensionWithClassLoaderTest.java index b43eb22ab8952..57882a6a4b33b 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/extensions/ProxyExtensionWithClassLoaderTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/extensions/ProxyExtensionWithClassLoaderTest.java @@ -70,6 +70,7 @@ public void testWrapper() throws Exception { verify(h, times(1)).start(service); } + @Test public void testClassLoaderSwitcher() throws Exception { NarClassLoader loader = mock(NarClassLoader.class); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java index 545912dcdc030..8c7d4fa9198ea 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/AuthedAdminProxyHandlerTest.java @@ -83,6 +83,7 @@ protected void setup() throws Exception { proxyConfig.setAuthenticationEnabled(true); proxyConfig.setAuthorizationEnabled(true); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); @@ -109,7 +110,6 @@ protected void setup() throws Exception { doReturn(report).when(discoveryProvider).nextBroker(); ServletHolder servletHolder = new ServletHolder(new AdminProxyHandler(proxyConfig, discoveryProvider)); - servletHolder.setInitParameter("preserveHost", "true"); webServer.addServlet("/admin", servletHolder); webServer.addServlet("/lookup", servletHolder); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/BrokerProxyValidatorTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/BrokerProxyValidatorTest.java new file mode 100644 index 0000000000000..fba3c36e26616 --- /dev/null +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/BrokerProxyValidatorTest.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pulsar.proxy.server; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; +import io.netty.resolver.AddressResolver; +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.concurrent.SucceededFuture; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.concurrent.ExecutionException; +import org.apache.curator.shaded.com.google.common.net.InetAddresses; +import org.testng.annotations.Test; + +public class BrokerProxyValidatorTest { + + @Test + public void shouldAllowValidInput() throws Exception { + BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator( + createMockedAddressResolver("1.2.3.4"), + "myhost" + , "1.2.0.0/16" + , "6650"); + InetSocketAddress inetSocketAddress = brokerProxyValidator.resolveAndCheckTargetAddress("myhost:6650").get(); + assertNotNull(inetSocketAddress); + assertEquals(inetSocketAddress.getAddress().getHostAddress(), "1.2.3.4"); + assertEquals(inetSocketAddress.getPort(), 6650); + } + + @Test(expectedExceptions = ExecutionException.class, + expectedExceptionsMessageRegExp = ".*Given host in 'myhost:6650' isn't allowed.") + public void shouldPreventInvalidHostName() throws Exception { + BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator( + createMockedAddressResolver("1.2.3.4"), + "allowedhost" + , "1.2.0.0/16" + , "6650"); + brokerProxyValidator.resolveAndCheckTargetAddress("myhost:6650").get(); + } + + @Test(expectedExceptions = ExecutionException.class, + expectedExceptionsMessageRegExp = ".* The IP address of the given host and port 'myhost:6650' isn't allowed.") + public void shouldPreventInvalidIPAddress() throws Exception { + BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator( + createMockedAddressResolver("1.2.3.4"), + "myhost" + , "1.3.0.0/16" + , "6650"); + brokerProxyValidator.resolveAndCheckTargetAddress("myhost:6650").get(); + } + + @Test + public void shouldSupportHostNamePattern() throws Exception { + BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator( + createMockedAddressResolver("1.2.3.4"), + "*.mydomain" + , "1.2.0.0/16" + , "6650"); + brokerProxyValidator.resolveAndCheckTargetAddress("myhost.mydomain:6650").get(); + } + + @Test + public void shouldAllowAllWithWildcard() throws Exception { + BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator( + createMockedAddressResolver("1.2.3.4"), + "*" + , "*" + , "6650"); + brokerProxyValidator.resolveAndCheckTargetAddress("myhost.mydomain:6650").get(); + } + + @Test + public void shouldAllowIPv6Address() throws Exception { + BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator( + createMockedAddressResolver("fd4d:801b:73fa:abcd:0000:0000:0000:0001"), + "*" + , "fd4d:801b:73fa:abcd::/64" + , "6650"); + brokerProxyValidator.resolveAndCheckTargetAddress("myhost.mydomain:6650").get(); + } + + @Test + public void shouldAllowIPv6AddressNumeric() throws Exception { + BrokerProxyValidator brokerProxyValidator = new BrokerProxyValidator( + createMockedAddressResolver("fd4d:801b:73fa:abcd:0000:0000:0000:0001"), + "*" + , "fd4d:801b:73fa:abcd::/64" + , "6650"); + brokerProxyValidator.resolveAndCheckTargetAddress("fd4d:801b:73fa:abcd:0000:0000:0000:0001:6650").get(); + } + + private AddressResolver createMockedAddressResolver(String ipAddressResult) { + AddressResolver inetSocketAddressResolver = mock(AddressResolver.class); + when(inetSocketAddressResolver.resolve(any())).then(invocationOnMock -> { + InetSocketAddress address = (InetSocketAddress) invocationOnMock.getArgument(0); + return new SucceededFuture(mock(EventExecutor.class), + new InetSocketAddress(InetAddresses.forString(ipAddressResult), address.getPort())); + }); + return inetSocketAddressResolver; + } +} diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAdditionalServletTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAdditionalServletTest.java index a909a9ff3b818..94009c84f0e64 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAdditionalServletTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAdditionalServletTest.java @@ -72,6 +72,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setZookeeperServers(DUMMY_VALUE); proxyConfig.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticatedProducerConsumerTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticatedProducerConsumerTest.java index e63d3aeb4cb96..b37dedfe6a93a 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticatedProducerConsumerTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticatedProducerConsumerTest.java @@ -106,6 +106,7 @@ protected void setup() throws Exception { proxyConfig.setAuthenticationEnabled(true); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java index f6d53c8ec5357..9ebe0ab65c820 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyAuthenticationTest.java @@ -215,6 +215,7 @@ void testAuthentication() throws Exception { ProxyConfiguration proxyConfig = new ProxyConfiguration(); proxyConfig.setAuthenticationEnabled(true); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionTest.java new file mode 100644 index 0000000000000..8c07e4b42d797 --- /dev/null +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionTest.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.proxy.server; + +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertTrue; +import org.apache.pulsar.client.impl.conf.ClientConfigurationData; +import org.testng.annotations.Test; + +public class ProxyConnectionTest { + + @Test + public void testMatchesHostAndPort() { + assertTrue(ProxyConnection + .matchesHostAndPort("pulsar://", "pulsar://1.2.3.4:6650", "1.2.3.4:6650")); + assertTrue(ProxyConnection + .matchesHostAndPort("pulsar+ssl://", "pulsar+ssl://1.2.3.4:6650", "1.2.3.4:6650")); + assertFalse(ProxyConnection + .matchesHostAndPort("pulsar://", "pulsar://1.2.3.4:12345", "5.6.7.8:1234")); + assertFalse(ProxyConnection + .matchesHostAndPort("pulsar://", "pulsar://1.2.3.4:12345", "1.2.3.4:1234")); + } + @Test + public void testCreateClientConfiguration() { + ProxyConfiguration proxyConfiguration = new ProxyConfiguration(); + proxyConfiguration.setTlsEnabledWithBroker(true); + String proxyUrlTls = "pulsar+ssl://proxy:6651"; + String proxyUrl = "pulsar://proxy:6650"; + + ProxyService proxyService = mock(ProxyService.class); + doReturn(proxyConfiguration).when(proxyService).getConfiguration(); + doReturn(proxyUrlTls).when(proxyService).getServiceUrlTls(); + doReturn(proxyUrl).when(proxyService).getServiceUrl(); + + ProxyConnection proxyConnection = new ProxyConnection(proxyService, null); + ClientConfigurationData clientConfiguration = proxyConnection.createClientConfiguration(); + assertEquals(clientConfiguration.getServiceUrl(), proxyUrlTls); + + proxyConfiguration.setTlsEnabledWithBroker(false); + clientConfiguration = proxyConnection.createClientConfiguration(); + assertEquals(clientConfiguration.getServiceUrl(), proxyUrl); + } +} diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java index 062db184e0682..128d33fbf19db 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyConnectionThrottlingTest.java @@ -53,6 +53,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setZookeeperServers(DUMMY_VALUE); proxyConfig.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); proxyConfig.setMaxConcurrentLookupRequests(NUM_CONCURRENT_LOOKUP); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyEnableHAProxyProtocolTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyEnableHAProxyProtocolTest.java index 44403fbb39b16..496b3ca5c4d4a 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyEnableHAProxyProtocolTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyEnableHAProxyProtocolTest.java @@ -56,6 +56,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.ofNullable(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setZookeeperServers(DUMMY_VALUE); proxyConfig.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); proxyConfig.setHaProxyProtocolEnabled(true); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java index cf61dac0e6b68..aa8475565155b 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyForwardAuthDataTest.java @@ -104,6 +104,7 @@ public void testForwardAuthData() throws Exception { proxyConfig.setAuthenticationEnabled(true); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); proxyConfig.setBrokerClientAuthenticationPlugin(BasicAuthentication.class.getName()); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithAuth.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithAuth.java index af76bfaeb2bb4..f1cb69f782da2 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithAuth.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithAuth.java @@ -78,6 +78,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithoutAuth.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithoutAuth.java index 9b0e9b427e56c..03d0b2b2a8fcc 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithoutAuth.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyKeyStoreTlsTestWithoutAuth.java @@ -73,6 +73,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java index fa3c485335581..51450264c8d15 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyLookupThrottlingTest.java @@ -52,6 +52,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setZookeeperServers(DUMMY_VALUE); proxyConfig.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); proxyConfig.setMaxConcurrentLookupRequests(NUM_CONCURRENT_LOOKUP); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyParserTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyParserTest.java index 905ca2066c738..654686dedf2f4 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyParserTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyParserTest.java @@ -71,6 +71,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setZookeeperServers(DUMMY_VALUE); proxyConfig.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); //enable full parsing feature diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java new file mode 100644 index 0000000000000..907865d76c579 --- /dev/null +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRefreshAuthTest.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.proxy.server; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.mockito.Mockito.spy; +import static org.testng.Assert.assertTrue; +import com.google.common.collect.Sets; +import io.jsonwebtoken.SignatureAlgorithm; +import java.util.Calendar; +import java.util.Collections; +import java.util.HashSet; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import javax.crypto.SecretKey; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.broker.authentication.AuthenticationService; +import org.apache.pulsar.broker.authentication.utils.AuthTokenUtils; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerConsumerBase; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.impl.ClientCnx; +import org.apache.pulsar.client.impl.PulsarClientImpl; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.awaitility.Awaitility; +import org.mockito.Mockito; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +@Slf4j +public class ProxyRefreshAuthTest extends ProducerConsumerBase { + private final SecretKey SECRET_KEY = AuthTokenUtils.createSecretKey(SignatureAlgorithm.HS256); + + private ProxyService proxyService; + private final ProxyConfiguration proxyConfig = new ProxyConfiguration(); + + @Override + protected void doInitConf() throws Exception { + super.doInitConf(); + + // enable tls and auth&auth at broker + conf.setAuthenticationEnabled(true); + conf.setAuthorizationEnabled(false); + conf.setTopicLevelPoliciesEnabled(false); + conf.setProxyRoles(Collections.singleton("Proxy")); + conf.setAdvertisedAddress(null); + conf.setAuthenticateOriginalAuthData(true); + conf.setBrokerServicePort(Optional.of(0)); + conf.setWebServicePort(Optional.of(0)); + + Set superUserRoles = new HashSet<>(); + superUserRoles.add("superUser"); + conf.setSuperUserRoles(superUserRoles); + + conf.setAuthenticationProviders(Collections.singleton(AuthenticationProviderToken.class.getName())); + Properties properties = new Properties(); + properties.setProperty("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(SECRET_KEY)); + conf.setProperties(properties); + + conf.setClusterName("proxy-authorization"); + conf.setNumExecutorThreadPoolSize(5); + + conf.setAuthenticationRefreshCheckSeconds(1); + } + + @BeforeClass + @Override + protected void setup() throws Exception { + super.init(); + + admin = PulsarAdmin.builder().serviceHttpUrl(pulsar.getWebServiceAddress()) + .authentication(new AuthenticationToken( + () -> AuthTokenUtils.createToken(SECRET_KEY, "client", Optional.empty()))).build(); + String namespaceName = "my-tenant/my-ns"; + admin.clusters().createCluster("proxy-authorization", + ClusterData.builder().serviceUrlTls(brokerUrlTls.toString()).build()); + admin.tenants().createTenant("my-tenant", + new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("proxy-authorization"))); + admin.namespaces().createNamespace(namespaceName); + + // start proxy service + proxyConfig.setAuthenticationEnabled(true); + proxyConfig.setAuthorizationEnabled(false); + proxyConfig.setForwardAuthorizationCredentials(true); + proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); + proxyConfig.setAdvertisedAddress(null); + + proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); + proxyConfig.setWebServicePort(Optional.of(0)); + + proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationToken.class.getName()); + proxyConfig.setBrokerClientAuthenticationParameters( + AuthTokenUtils.createToken(SECRET_KEY, "Proxy", Optional.empty())); + proxyConfig.setAuthenticationProviders(Collections.singleton(AuthenticationProviderToken.class.getName())); + Properties properties = new Properties(); + properties.setProperty("tokenSecretKey", AuthTokenUtils.encodeKeyBase64(SECRET_KEY)); + proxyConfig.setProperties(properties); + + proxyService = Mockito.spy(new ProxyService(proxyConfig, + new AuthenticationService( + PulsarConfigurationLoader.convertFrom(proxyConfig)))); + } + + @AfterClass(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + proxyService.close(); + } + + private void startProxy(boolean forwardAuthData) throws Exception { + pulsar.getConfiguration().setAuthenticateOriginalAuthData(forwardAuthData); + proxyConfig.setForwardAuthorizationCredentials(forwardAuthData); + proxyService.start(); + } + + @DataProvider + Object[] forwardAuthDataProvider() { + return new Object[]{true, false}; + } + + @Test(dataProvider = "forwardAuthDataProvider") + public void testAuthDataRefresh(boolean forwardAuthData) throws Exception { + log.info("-- Starting {} test --", methodName); + + startProxy(forwardAuthData); + + AuthenticationToken authenticationToken = new AuthenticationToken(() -> { + Calendar calendar = Calendar.getInstance(); + calendar.add(Calendar.SECOND, 1); + return AuthTokenUtils.createToken(SECRET_KEY, "client", Optional.of(calendar.getTime())); + }); + + pulsarClient = PulsarClient.builder().serviceUrl(proxyService.getServiceUrl()) + .authentication(authenticationToken) + .build(); + + String topic = "persistent://my-tenant/my-ns/my-topic1"; + @Cleanup + Producer ignored = spy(pulsarClient.newProducer() + .topic(topic).create()); + + PulsarClientImpl pulsarClientImpl = (PulsarClientImpl) pulsarClient; + Set> connections = pulsarClientImpl.getCnxPool().getConnections(); + + Awaitility.await().during(4, SECONDS).untilAsserted(() -> { + pulsarClient.getPartitionsForTopic(topic).get(); + assertTrue(connections.stream().allMatch(n -> { + try { + ClientCnx clientCnx = n.get(); + long timestamp = clientCnx.getLastDisconnectedTimestamp(); + return timestamp == 0; + } catch (Exception e) { + throw new RuntimeException(e); + } + })); + }); + } +} diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java index 9ae3fbc09f3ff..39446af99a577 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyRolesEnforcementTest.java @@ -209,6 +209,7 @@ public void testIncorrectRoles() throws Exception { proxyConfig.setAuthenticationEnabled(true); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java index 3377ec266a713..62b65d32e8c2b 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceStarterTest.java @@ -51,6 +51,7 @@ public class ProxyServiceStarterTest extends MockedPulsarServiceBaseTest { static final String[] ARGS = new String[]{"-c", "./src/test/resources/proxy.conf"}; private ProxyServiceStarter serviceStarter; + private String serviceUrl; @Override @BeforeClass @@ -59,9 +60,12 @@ protected void setup() throws Exception { serviceStarter = new ProxyServiceStarter(ARGS); serviceStarter.getConfig().setBrokerServiceURL(pulsar.getBrokerServiceUrl()); serviceStarter.getConfig().setBrokerWebServiceURL(pulsar.getWebServiceAddress()); - serviceStarter.getConfig().setServicePort(Optional.of(11000)); + serviceStarter.getConfig().setWebServicePort(Optional.of(0)); + serviceStarter.getConfig().setServicePort(Optional.of(0)); serviceStarter.getConfig().setWebSocketServiceEnabled(true); + serviceStarter.getConfig().setBrokerProxyAllowedTargetPorts("*"); serviceStarter.start(); + serviceUrl = serviceStarter.getProxyService().getServiceUrl(); } @Override @@ -71,14 +75,19 @@ protected void cleanup() throws Exception { serviceStarter.close(); } + private String computeWsBasePath() { + return String.format("ws://localhost:%d/ws", serviceStarter.getServer().getListenPortHTTP().get()); + } + @Test public void testEnableWebSocketServer() throws Exception { HttpClient httpClient = new HttpClient(); WebSocketClient webSocketClient = new WebSocketClient(httpClient); webSocketClient.start(); MyWebSocket myWebSocket = new MyWebSocket(); - String webSocketUri = "ws://localhost:8080/ws/pingpong"; + String webSocketUri = computeWsBasePath() + "/pingpong"; Future sessionFuture = webSocketClient.connect(myWebSocket, URI.create(webSocketUri)); + System.out.println("uri" + webSocketUri); sessionFuture.get().getRemote().sendPing(ByteBuffer.wrap("ping".getBytes())); assertTrue(myWebSocket.getResponse().contains("ping")); } @@ -86,7 +95,7 @@ public void testEnableWebSocketServer() throws Exception { @Test public void testProducer() throws Exception { @Cleanup - PulsarClient client = PulsarClient.builder().serviceUrl("pulsar://localhost:11000") + PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl) .build(); @Cleanup @@ -105,7 +114,7 @@ public void testProduceAndConsumeMessageWithWebsocket() throws Exception { WebSocketClient producerWebSocketClient = new WebSocketClient(producerClient); producerWebSocketClient.start(); MyWebSocket producerSocket = new MyWebSocket(); - String produceUri = "ws://localhost:8080/ws/producer/persistent/sample/test/local/websocket-topic"; + String produceUri = computeWsBasePath() + "/producer/persistent/sample/test/local/websocket-topic"; Future producerSession = producerWebSocketClient.connect(producerSocket, URI.create(produceUri)); ProducerMessage produceRequest = new ProducerMessage(); @@ -116,7 +125,7 @@ public void testProduceAndConsumeMessageWithWebsocket() throws Exception { WebSocketClient consumerWebSocketClient = new WebSocketClient(consumerClient); consumerWebSocketClient.start(); MyWebSocket consumerSocket = new MyWebSocket(); - String consumeUri = "ws://localhost:8080/ws/consumer/persistent/sample/test/local/websocket-topic/my-sub"; + String consumeUri = computeWsBasePath() + "/consumer/persistent/sample/test/local/websocket-topic/my-sub"; Future consumerSession = consumerWebSocketClient.connect(consumerSocket, URI.create(consumeUri)); consumerSession.get().getRemote().sendPing(ByteBuffer.wrap("ping".getBytes())); producerSession.get().getRemote().sendString(ObjectMapperFactory.getThreadLocal().writeValueAsString(produceRequest)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java index 7e6c0f5f25f6c..742cfbb6581ee 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyServiceTlsStarterTest.java @@ -52,6 +52,8 @@ public class ProxyServiceTlsStarterTest extends MockedPulsarServiceBaseTest { private final String TLS_PROXY_CERT_FILE_PATH = "./src/test/resources/authentication/tls/server-cert.pem"; private final String TLS_PROXY_KEY_FILE_PATH = "./src/test/resources/authentication/tls/server-key.pem"; private ProxyServiceStarter serviceStarter; + private String serviceUrl; + private int webPort; @Override @BeforeClass @@ -62,12 +64,17 @@ protected void setup() throws Exception { serviceStarter.getConfig().setBrokerServiceURLTLS(pulsar.getBrokerServiceUrlTls()); serviceStarter.getConfig().setBrokerWebServiceURL(pulsar.getWebServiceAddress()); serviceStarter.getConfig().setBrokerClientTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH); - serviceStarter.getConfig().setServicePortTls(Optional.of(11043)); + serviceStarter.getConfig().setServicePort(Optional.empty()); + serviceStarter.getConfig().setServicePortTls(Optional.of(0)); + serviceStarter.getConfig().setWebServicePort(Optional.of(0)); serviceStarter.getConfig().setTlsEnabledWithBroker(true); serviceStarter.getConfig().setWebSocketServiceEnabled(true); serviceStarter.getConfig().setTlsCertificateFilePath(TLS_PROXY_CERT_FILE_PATH); serviceStarter.getConfig().setTlsKeyFilePath(TLS_PROXY_KEY_FILE_PATH); + serviceStarter.getConfig().setBrokerProxyAllowedTargetPorts("*"); serviceStarter.start(); + serviceUrl = serviceStarter.getProxyService().getServiceUrlTls(); + webPort = serviceStarter.getServer().getListenPortHTTP().get(); } protected void doInitConf() throws Exception { @@ -86,7 +93,7 @@ protected void cleanup() throws Exception { @Test public void testProducer() throws Exception { @Cleanup - PulsarClient client = PulsarClient.builder().serviceUrl("pulsar+ssl://localhost:11043") + PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl) .allowTlsInsecureConnection(false).tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH) .build(); @@ -106,7 +113,7 @@ public void testProduceAndConsumeMessageWithWebsocket() throws Exception { WebSocketClient producerWebSocketClient = new WebSocketClient(producerClient); producerWebSocketClient.start(); MyWebSocket producerSocket = new MyWebSocket(); - String produceUri = "ws://localhost:8080/ws/producer/persistent/sample/test/local/websocket-topic"; + String produceUri = "ws://localhost:" + webPort + "/ws/producer/persistent/sample/test/local/websocket-topic"; Future producerSession = producerWebSocketClient.connect(producerSocket, URI.create(produceUri)); ProducerMessage produceRequest = new ProducerMessage(); @@ -117,7 +124,7 @@ public void testProduceAndConsumeMessageWithWebsocket() throws Exception { WebSocketClient consumerWebSocketClient = new WebSocketClient(consumerClient); consumerWebSocketClient.start(); MyWebSocket consumerSocket = new MyWebSocket(); - String consumeUri = "ws://localhost:8080/ws/consumer/persistent/sample/test/local/websocket-topic/my-sub"; + String consumeUri = "ws://localhost:" + webPort + "/ws/consumer/persistent/sample/test/local/websocket-topic/my-sub"; Future consumerSession = consumerWebSocketClient.connect(consumerSocket, URI.create(consumeUri)); consumerSession.get().getRemote().sendPing(ByteBuffer.wrap("ping".getBytes())); producerSession.get().getRemote().sendString(ObjectMapperFactory.getThreadLocal().writeValueAsString(produceRequest)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyStatsTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyStatsTest.java index 2b1c22c22d012..1859c243436f4 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyStatsTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyStatsTest.java @@ -67,6 +67,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setZookeeperServers(DUMMY_VALUE); proxyConfig.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java index 92f6a63d0f185..a90243fe019c7 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTest.java @@ -90,6 +90,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.ofNullable(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setZookeeperServers(DUMMY_VALUE); proxyConfig.setConfigurationStoreServers(GLOBAL_DUMMY_VALUE); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java index 59beb94712c7d..5081d0e3bb596 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTest.java @@ -56,6 +56,7 @@ protected void setup() throws Exception { internalSetup(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java index 0d3d3a041f75c..ece35cf7b22f6 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyTlsTestWithAuth.java @@ -58,6 +58,7 @@ protected void setup() throws Exception { writer.close(); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java index 5d05867d4fffd..b9d9b04ae3d74 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationNegTest.java @@ -113,6 +113,7 @@ protected void setup() throws Exception { proxyConfig.setBrokerServiceURLTLS(pulsar.getBrokerServiceUrlTls()); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationTest.java index 14c72881b2994..dd06f33b79a86 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithAuthorizationTest.java @@ -19,15 +19,13 @@ package org.apache.pulsar.proxy.server; import static org.mockito.Mockito.spy; - import com.google.common.collect.Sets; - +import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; - import lombok.Cleanup; import org.apache.pulsar.broker.authentication.AuthenticationProviderTls; import org.apache.pulsar.broker.authentication.AuthenticationService; @@ -145,20 +143,24 @@ public Object[][] protocolsCiphersProviderCodecProvider() { }; } - @BeforeMethod @Override - protected void setup() throws Exception { - + protected void doInitConf() throws Exception { + super.doInitConf(); // enable tls and auth&auth at broker conf.setAuthenticationEnabled(true); conf.setAuthorizationEnabled(true); + conf.setTopicLevelPoliciesEnabled(false); + conf.setProxyRoles(Collections.singleton("Proxy")); + conf.setAdvertisedAddress(null); conf.setBrokerServicePortTls(Optional.of(0)); + conf.setBrokerServicePort(Optional.empty()); conf.setWebServicePortTls(Optional.of(0)); + conf.setWebServicePort(Optional.empty()); conf.setTlsTrustCertsFilePath(TLS_PROXY_TRUST_CERT_FILE_PATH); conf.setTlsCertificateFilePath(TLS_BROKER_CERT_FILE_PATH); conf.setTlsKeyFilePath(TLS_BROKER_KEY_FILE_PATH); - conf.setTlsAllowInsecureConnection(true); + conf.setTlsAllowInsecureConnection(false); Set superUserRoles = new HashSet<>(); superUserRoles.add("superUser"); @@ -168,22 +170,27 @@ protected void setup() throws Exception { conf.setBrokerClientAuthenticationParameters( "tlsCertFile:" + TLS_BROKER_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_BROKER_KEY_FILE_PATH); conf.setBrokerClientTrustCertsFilePath(TLS_BROKER_TRUST_CERT_FILE_PATH); - Set providers = new HashSet<>(); - providers.add(AuthenticationProviderTls.class.getName()); - conf.setAuthenticationProviders(providers); + conf.setAuthenticationProviders(Collections.singleton(AuthenticationProviderTls.class.getName())); conf.setClusterName("proxy-authorization"); conf.setNumExecutorThreadPoolSize(5); + } + @BeforeMethod + @Override + protected void setup() throws Exception { super.init(); // start proxy service proxyConfig.setAuthenticationEnabled(true); proxyConfig.setAuthorizationEnabled(false); + proxyConfig.setForwardAuthorizationCredentials(true); proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); proxyConfig.setBrokerServiceURLTLS(pulsar.getBrokerServiceUrlTls()); + proxyConfig.setAdvertisedAddress(null); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); @@ -197,7 +204,7 @@ protected void setup() throws Exception { proxyConfig.setBrokerClientAuthenticationPlugin(AuthenticationTls.class.getName()); proxyConfig.setBrokerClientAuthenticationParameters( "tlsCertFile:" + TLS_PROXY_CERT_FILE_PATH + "," + "tlsKeyFile:" + TLS_PROXY_KEY_FILE_PATH); - proxyConfig.setAuthenticationProviders(providers); + proxyConfig.setAuthenticationProviders(Collections.singleton(AuthenticationProviderTls.class.getName())); proxyService = Mockito.spy(new ProxyService(proxyConfig, new AuthenticationService( @@ -239,11 +246,11 @@ public void testProxyAuthorization() throws Exception { @Cleanup PulsarClient proxyClient = createPulsarClient(proxyService.getServiceUrlTls(), PulsarClient.builder()); - String namespaceName = "my-property/proxy-authorization/my-ns"; + String namespaceName = "my-tenant/my-ns"; - admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrlTls(brokerUrlTls.toString()).build()); - admin.tenants().createTenant("my-property", + admin.tenants().createTenant("my-tenant", new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("proxy-authorization"))); admin.namespaces().createNamespace(namespaceName); @@ -253,11 +260,11 @@ public void testProxyAuthorization() throws Exception { Sets.newHashSet(AuthAction.consume, AuthAction.produce)); Consumer consumer = proxyClient.newConsumer() - .topic("persistent://my-property/proxy-authorization/my-ns/my-topic1") + .topic("persistent://my-tenant/my-ns/my-topic1") .subscriptionName("my-subscriber-name").subscribe(); Producer producer = proxyClient.newProducer(Schema.BYTES) - .topic("persistent://my-property/proxy-authorization/my-ns/my-topic1").create(); + .topic("persistent://my-tenant/my-ns/my-topic1").create(); final int msgs = 10; for (int i = 0; i < msgs; i++) { String message = "my-message-" + i; @@ -293,11 +300,11 @@ public void testTlsHostVerificationProxyToClient(boolean hostnameVerificationEna PulsarClient proxyClient = createPulsarClient(proxyService.getServiceUrlTls(), PulsarClient.builder().enableTlsHostnameVerification(hostnameVerificationEnabled)); - String namespaceName = "my-property/proxy-authorization/my-ns"; + String namespaceName = "my-tenant/my-ns"; - admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrl(brokerUrlTls.toString()).build()); - admin.tenants().createTenant("my-property", + admin.tenants().createTenant("my-tenant", new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("proxy-authorization"))); admin.namespaces().createNamespace(namespaceName); @@ -307,7 +314,7 @@ public void testTlsHostVerificationProxyToClient(boolean hostnameVerificationEna Sets.newHashSet(AuthAction.consume, AuthAction.produce)); try { - proxyClient.newConsumer().topic("persistent://my-property/proxy-authorization/my-ns/my-topic1") + proxyClient.newConsumer().topic("persistent://my-tenant/my-ns/my-topic1") .subscriptionName("my-subscriber-name").subscribe(); if (hostnameVerificationEnabled) { Assert.fail("Connection should be failed due to hostnameVerification enabled"); @@ -343,13 +350,13 @@ public void testTlsHostVerificationProxyToBroker(boolean hostnameVerificationEna // create a client which connects to proxy over tls and pass authData @Cleanup PulsarClient proxyClient = createPulsarClient(proxyService.getServiceUrlTls(), - PulsarClient.builder().operationTimeout(1, TimeUnit.SECONDS)); + PulsarClient.builder().operationTimeout(15, TimeUnit.SECONDS)); - String namespaceName = "my-property/proxy-authorization/my-ns"; + String namespaceName = "my-tenant/my-ns"; - admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrlTls(brokerUrlTls.toString()).build()); - admin.tenants().createTenant("my-property", + admin.tenants().createTenant("my-tenant", new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("proxy-authorization"))); admin.namespaces().createNamespace(namespaceName); @@ -359,7 +366,7 @@ public void testTlsHostVerificationProxyToBroker(boolean hostnameVerificationEna Sets.newHashSet(AuthAction.consume, AuthAction.produce)); try { - proxyClient.newConsumer().topic("persistent://my-property/proxy-authorization/my-ns/my-topic1") + proxyClient.newConsumer().topic("persistent://my-tenant/my-ns/my-topic1") .subscriptionName("my-subscriber-name").subscribe(); if (hostnameVerificationEnabled) { Assert.fail("Connection should be failed due to hostnameVerification enabled"); @@ -381,12 +388,12 @@ public void testTlsHostVerificationProxyToBroker(boolean hostnameVerificationEna public void tlsCiphersAndProtocols(Set tlsCiphers, Set tlsProtocols, boolean expectFailure) throws Exception { log.info("-- Starting {} test --", methodName); - String namespaceName = "my-property/proxy-authorization/my-ns"; + String namespaceName = "my-tenant/my-ns"; createAdminClient(); - admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + admin.clusters().createCluster("proxy-authorization", ClusterData.builder().serviceUrl(brokerUrlTls.toString()).build()); - admin.tenants().createTenant("my-property", + admin.tenants().createTenant("my-tenant", new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("proxy-authorization"))); admin.namespaces().createNamespace(namespaceName); @@ -398,10 +405,13 @@ public void tlsCiphersAndProtocols(Set tlsCiphers, Set tlsProtoc ProxyConfiguration proxyConfig = new ProxyConfiguration(); proxyConfig.setAuthenticationEnabled(true); proxyConfig.setAuthorizationEnabled(false); + proxyConfig.setForwardAuthorizationCredentials(true); proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); proxyConfig.setBrokerServiceURLTLS(pulsar.getBrokerServiceUrlTls()); + proxyConfig.setAdvertisedAddress(null); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); @@ -445,7 +455,7 @@ public void tlsCiphersAndProtocols(Set tlsCiphers, Set tlsProtoc @Cleanup PulsarClient proxyClient = createPulsarClient("pulsar://localhost:" + proxyService.getListenPortTls().get(), PulsarClient.builder()); Consumer consumer = proxyClient.newConsumer() - .topic("persistent://my-property/proxy-authorization/my-ns/my-topic1") + .topic("persistent://my-tenant/my-ns/my-topic1") .subscriptionName("my-subscriber-name").subscribe(); if (expectFailure) { @@ -467,7 +477,7 @@ private void createAdminClient() throws Exception { authParams.put("tlsKeyFile", TLS_SUPERUSER_CLIENT_KEY_FILE_PATH); admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString()) - .tlsTrustCertsFilePath(TLS_PROXY_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(true) + .tlsTrustCertsFilePath(TLS_BROKER_TRUST_CERT_FILE_PATH) .authentication(AuthenticationTls.class.getName(), authParams).build()); } @@ -481,7 +491,7 @@ private PulsarClient createPulsarClient(String proxyServiceUrl, ClientBuilder cl authTls.configure(authParams); return clientBuilder.serviceUrl(proxyServiceUrl).statsInterval(0, TimeUnit.SECONDS) - .tlsTrustCertsFilePath(TLS_PROXY_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(true) + .tlsTrustCertsFilePath(TLS_PROXY_TRUST_CERT_FILE_PATH) .authentication(authTls).enableTls(true) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); } diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java index 693e4ca5db9d6..6178454dd1900 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithJwtAuthorizationTest.java @@ -98,6 +98,7 @@ protected void setup() throws Exception { proxyConfig.setBrokerServiceURL(pulsar.getBrokerServiceUrl()); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); // enable auth&auth and use JWT at proxy diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java index f20401c33aebf..59c50deafec1a 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/ProxyWithoutServiceDiscoveryTest.java @@ -104,6 +104,7 @@ protected void setup() throws Exception { proxyConfig.setBrokerServiceURLTLS(pulsar.getBrokerServiceUrlTls()); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java index 7dc927a8a5cc2..b044469b4f2dc 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/SuperUserAuthedAdminProxyHandlerTest.java @@ -80,6 +80,7 @@ protected void setup() throws Exception { proxyConfig.setAuthenticationEnabled(true); proxyConfig.setAuthorizationEnabled(true); proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setServicePortTls(Optional.of(0)); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setWebServicePortTls(Optional.of(0)); @@ -106,7 +107,6 @@ protected void setup() throws Exception { doReturn(report).when(discoveryProvider).nextBroker(); ServletHolder servletHolder = new ServletHolder(new AdminProxyHandler(proxyConfig, discoveryProvider)); - servletHolder.setInitParameter("preserveHost", "true"); webServer.addServlet("/admin", servletHolder); webServer.addServlet("/lookup", servletHolder); diff --git a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/UnauthedAdminProxyHandlerTest.java b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/UnauthedAdminProxyHandlerTest.java index 628f7cc05224a..b7535b756ac97 100644 --- a/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/UnauthedAdminProxyHandlerTest.java +++ b/pulsar-proxy/src/test/java/org/apache/pulsar/proxy/server/UnauthedAdminProxyHandlerTest.java @@ -68,6 +68,7 @@ protected void setup() throws Exception { // start proxy service proxyConfig.setServicePort(Optional.of(0)); + proxyConfig.setBrokerProxyAllowedTargetPorts("*"); proxyConfig.setWebServicePort(Optional.of(0)); proxyConfig.setBrokerWebServiceURL(brokerUrl.toString()); proxyConfig.setStatusFilePath(STATUS_FILE_PATH); @@ -82,7 +83,6 @@ protected void setup() throws Exception { discoveryProvider = spy(new BrokerDiscoveryProvider(proxyConfig, resource)); adminProxyHandler = new AdminProxyWrapper(proxyConfig, discoveryProvider); ServletHolder servletHolder = new ServletHolder(adminProxyHandler); - servletHolder.setInitParameter("preserveHost", "true"); webServer.addServlet("/admin", servletHolder); webServer.addServlet("/lookup", servletHolder); diff --git a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cacert.pem b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cacert.pem index df21a4968bfb2..7d2d58d8d7a06 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cacert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cacert.pem @@ -2,76 +2,76 @@ Certificate: Data: Version: 3 (0x2) Serial Number: - 37:55:7a:ae:71:6b:5f:f0:0d:f7:11:df:b5:f9:ce:e1:65:a4:0c:a4 + 40:cd:a5:a5:35:76:ee:02:57:8b:30:8f:2a:12:34:03:45:c5:96:8c Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: CN = CARoot Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: - 00:ce:29:c8:45:af:07:8e:79:1e:55:66:7b:93:af: - 09:2c:72:fd:d5:33:38:30:a9:b5:50:92:90:33:b0: - 55:b0:c4:6b:37:4a:ba:5b:76:4d:52:0b:9f:58:b2: - c5:95:8c:47:6d:2b:07:0a:f5:74:43:ec:7d:36:bf: - 3e:8c:d6:13:31:ce:fc:d1:77:b0:ac:3c:ae:69:4b: - bd:5d:93:bd:84:57:51:a7:ef:03:2e:ae:3e:93:73: - 8b:1e:39:90:8b:32:e2:0a:dd:b8:20:83:98:76:91: - 75:d6:d5:db:43:7b:f4:c9:4e:23:52:e3:11:55:05: - 48:b8:82:47:ea:32:0b:56:1b:07:11:f3:06:c7:4a: - d5:6b:87:c2:2e:e2:9a:8c:9d:54:ca:5e:96:08:02: - 5d:17:42:4d:73:86:08:ab:6e:2e:f3:a8:c3:a3:c1: - bd:88:63:5e:69:7e:fa:af:31:8d:3a:49:ed:e8:cf: - 80:15:ca:d4:2b:fe:84:3d:aa:27:7e:98:36:48:4f: - 3b:27:90:1d:c1:fe:4e:13:b0:5e:a5:32:6e:16:38: - 2e:b7:d1:f3:6b:18:a5:3e:b6:d7:07:42:21:c7:d9: - 8e:d6:8c:a5:bf:25:9e:5c:fc:c7:12:18:59:23:b9: - 3d:39:45:3d:1c:81:e2:f2:29:91:05:20:46:b2:52: - 06:51 + 00:d8:d5:00:e0:6b:4f:4e:8a:67:08:e9:e3:3f:23: + ef:15:1d:82:10:85:f3:3b:77:9c:96:c1:aa:eb:90: + 41:0b:5b:ae:77:d9:a3:f1:cf:2a:32:40:78:33:6a: + 81:b9:c2:cd:91:36:98:df:41:84:c0:62:8a:a1:03: + 89:8d:2b:b8:91:49:a9:e8:a2:90:ad:b9:cd:23:84: + bc:60:1f:6f:b5:81:9f:9c:cf:d5:26:a8:a5:b6:4d: + 59:5f:5c:7f:da:e8:1d:3d:04:f3:b8:ef:f8:d5:73: + c6:fd:6a:b1:91:ae:16:b7:45:21:9a:1a:1a:76:74: + 01:40:ee:fc:3c:67:be:6a:7f:f4:a3:82:37:ee:43: + 41:f5:67:d5:d5:64:9c:d8:53:75:34:4d:23:80:b5: + 59:13:c2:27:47:8e:20:32:6f:f6:b3:70:bf:5e:15: + 08:7e:d1:bf:aa:4d:06:6b:0d:17:21:eb:95:47:52: + fa:d7:97:ef:1a:5d:63:26:17:36:01:20:ac:57:50: + 34:f0:57:49:38:3d:9c:68:6a:87:91:38:b6:76:9d: + bc:e9:4e:c2:58:54:8d:8a:32:05:9e:ba:cb:f0:d0: + ec:91:67:1d:77:bf:d5:02:77:d4:22:78:94:f4:9a: + 49:fa:ef:b2:9b:30:1a:8a:f0:a7:9a:2b:e5:e9:c7: + 36:c5 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: - EF:DA:58:74:AA:21:F9:9E:19:7E:44:2B:84:32:93:F4:0F:79:18:3B + DD:AC:A0:40:6E:E9:2B:49:F2:35:DB:B4:E9:98:AD:58:7B:37:6B:55 X509v3 Authority Key Identifier: - keyid:EF:DA:58:74:AA:21:F9:9E:19:7E:44:2B:84:32:93:F4:0F:79:18:3B + keyid:DD:AC:A0:40:6E:E9:2B:49:F2:35:DB:B4:E9:98:AD:58:7B:37:6B:55 X509v3 Basic Constraints: critical CA:TRUE Signature Algorithm: sha256WithRSAEncryption - 2e:f5:b6:f7:fc:50:89:16:1e:ea:8c:ec:57:54:f6:ca:d3:19: - 65:fe:da:c5:73:53:f6:d0:1e:26:96:f2:d3:03:55:8d:6e:c4: - cd:8c:2d:7a:ea:fa:38:6c:ed:fa:d5:23:b8:52:c1:e3:52:04: - 3d:46:8c:2d:b6:b2:47:68:41:92:f6:47:24:50:78:47:5e:2a: - 9b:df:85:a8:92:0d:49:17:eb:51:e8:b2:69:3c:4a:f3:9f:5f: - ea:fd:b2:08:3c:30:1a:93:be:d3:c3:b3:c7:60:7c:ea:f4:15: - 43:bd:3f:b1:d0:69:3c:84:5b:05:01:55:d7:d5:87:fb:58:53: - 03:d8:91:5f:e8:e0:37:88:82:ea:dc:1c:2d:a0:8d:82:68:65: - 6e:ea:0d:2a:e1:aa:cc:b3:d1:ce:a8:2b:2d:ed:e4:ba:0f:7f: - 51:48:d2:4b:2f:7c:eb:02:01:4f:2c:b6:06:c1:9a:97:2c:b7: - 6c:b7:06:86:d1:8b:cc:d6:d4:c3:ff:b5:65:c5:92:eb:9c:68: - 6d:99:d8:4a:6d:7a:ac:fe:dc:f3:12:f8:bb:2b:0a:b9:d8:1e: - 87:b6:e9:8b:51:32:f3:7b:0b:1a:29:57:4c:7d:5a:b6:9c:83: - 23:e5:35:2b:98:83:aa:7c:ef:24:3a:74:a8:86:22:32:06:fb: - 03:b7:01:9d + 07:0c:90:05:fa:2c:c9:4e:05:ec:6b:7d:99:9c:52:2a:20:34: + 46:ac:8d:24:81:f9:a7:f3:1d:03:32:45:82:9a:61:af:1f:63: + 25:6b:97:ca:93:78:e5:d7:87:81:b6:29:22:d4:0d:8d:ed:0e: + bd:85:80:6c:38:e9:86:3c:bd:ee:ff:26:78:0a:f0:a7:54:0b: + af:27:9e:8b:83:b7:10:e9:44:0d:4a:7e:a8:e2:aa:1c:06:f8: + 18:f1:c4:c9:e4:bb:17:41:59:94:b4:dc:78:53:fb:1b:43:57: + 82:59:de:6c:03:52:9a:28:cb:e4:9e:ea:c5:00:93:e0:27:b4: + 4b:e6:b3:c5:88:2d:14:33:10:ff:b0:23:4e:5d:ea:17:97:7d: + f4:e2:c8:fe:c3:4a:77:83:64:ef:c9:b6:3e:77:64:32:07:91: + bd:e1:58:9a:e1:38:ab:eb:d2:e3:cb:05:7c:c7:f3:2b:47:bf: + 36:64:7e:32:5a:62:44:07:c8:8e:9d:55:1a:99:c4:14:5a:66: + ed:5f:8b:ab:dd:eb:36:28:cd:77:47:84:00:ae:a7:34:0e:0d: + 77:df:67:72:08:94:75:52:1b:4a:71:4d:31:5d:aa:1b:aa:b6: + e0:d6:86:52:7c:26:ae:1f:96:ab:06:32:cb:7a:f3:bb:76:3e: + 08:53:9f:64 -----BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIUN1V6rnFrX/AN9xHftfnO4WWkDKQwDQYJKoZIhvcNAQEL -BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIxMDQyMzE3MDg1MVoXDTMxMDQyMTE3 -MDg1MVowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAzinIRa8HjnkeVWZ7k68JLHL91TM4MKm1UJKQM7BVsMRrN0q6W3ZN -UgufWLLFlYxHbSsHCvV0Q+x9Nr8+jNYTMc780XewrDyuaUu9XZO9hFdRp+8DLq4+ -k3OLHjmQizLiCt24IIOYdpF11tXbQ3v0yU4jUuMRVQVIuIJH6jILVhsHEfMGx0rV -a4fCLuKajJ1Uyl6WCAJdF0JNc4YIq24u86jDo8G9iGNeaX76rzGNOknt6M+AFcrU -K/6EPaonfpg2SE87J5Adwf5OE7BepTJuFjgut9HzaxilPrbXB0Ihx9mO1oylvyWe -XPzHEhhZI7k9OUU9HIHi8imRBSBGslIGUQIDAQABo1MwUTAdBgNVHQ4EFgQU79pY -dKoh+Z4ZfkQrhDKT9A95GDswHwYDVR0jBBgwFoAU79pYdKoh+Z4ZfkQrhDKT9A95 -GDswDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEALvW29/xQiRYe -6ozsV1T2ytMZZf7axXNT9tAeJpby0wNVjW7EzYwteur6OGzt+tUjuFLB41IEPUaM -LbayR2hBkvZHJFB4R14qm9+FqJINSRfrUeiyaTxK859f6v2yCDwwGpO+08Ozx2B8 -6vQVQ70/sdBpPIRbBQFV19WH+1hTA9iRX+jgN4iC6twcLaCNgmhlbuoNKuGqzLPR -zqgrLe3kug9/UUjSSy986wIBTyy2BsGalyy3bLcGhtGLzNbUw/+1ZcWS65xobZnY -Sm16rP7c8xL4uysKudgeh7bpi1Ey83sLGilXTH1atpyDI+U1K5iDqnzvJDp0qIYi -Mgb7A7cBnQ== +MIIDAzCCAeugAwIBAgIUQM2lpTV27gJXizCPKhI0A0XFlowwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA2NUA4GtPTopnCOnjPyPvFR2CEIXzO3eclsGq65BBC1uud9mj8c8q +MkB4M2qBucLNkTaY30GEwGKKoQOJjSu4kUmp6KKQrbnNI4S8YB9vtYGfnM/VJqil +tk1ZX1x/2ugdPQTzuO/41XPG/Wqxka4Wt0UhmhoadnQBQO78PGe+an/0o4I37kNB +9WfV1WSc2FN1NE0jgLVZE8InR44gMm/2s3C/XhUIftG/qk0Gaw0XIeuVR1L615fv +Gl1jJhc2ASCsV1A08FdJOD2caGqHkTi2dp286U7CWFSNijIFnrrL8NDskWcdd7/V +AnfUIniU9JpJ+u+ymzAaivCnmivl6cc2xQIDAQABo1MwUTAdBgNVHQ4EFgQU3ayg +QG7pK0nyNdu06ZitWHs3a1UwHwYDVR0jBBgwFoAU3aygQG7pK0nyNdu06ZitWHs3 +a1UwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEABwyQBfosyU4F +7Gt9mZxSKiA0RqyNJIH5p/MdAzJFgpphrx9jJWuXypN45deHgbYpItQNje0OvYWA +bDjphjy97v8meArwp1QLryeei4O3EOlEDUp+qOKqHAb4GPHEyeS7F0FZlLTceFP7 +G0NXglnebANSmijL5J7qxQCT4Ce0S+azxYgtFDMQ/7AjTl3qF5d99OLI/sNKd4Nk +78m2PndkMgeRveFYmuE4q+vS48sFfMfzK0e/NmR+MlpiRAfIjp1VGpnEFFpm7V+L +q93rNijNd0eEAK6nNA4Nd99ncgiUdVIbSnFNMV2qG6q24NaGUnwmrh+WqwYyy3rz +u3Y+CFOfZA== -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cert.pem b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cert.pem index edd9a025176fe..31743d0684670 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cert.pem @@ -1,13 +1,13 @@ Certificate: Data: - Version: 1 (0x0) + Version: 3 (0x2) Serial Number: - 0c:26:15:df:8f:71:1d:6a:31:d0:da:af:64:ef:80:de:ac:9a:46:78 + 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:07 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: C = US, ST = CA, O = Apache Pulsar, OU = Broker, CN = Broker Subject Public Key Info: Public Key Algorithm: rsaEncryption @@ -32,37 +32,41 @@ Certificate: 07:f0:b0:06:4f:2c:4c:75:c2:37:ff:35:0d:b1:42: 06:0b Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 Signature Algorithm: sha256WithRSAEncryption - 46:84:81:7e:4a:91:2a:c0:d7:0c:5a:a2:fb:6e:a2:e1:66:15: - b9:b3:50:1c:93:8c:68:ba:90:42:07:2c:d1:d9:22:53:c4:e7: - 74:a9:ac:0c:25:cb:ae:c9:a1:c9:35:49:5d:10:c6:ee:08:2a: - 23:f3:a4:87:24:92:c4:4e:35:b8:23:8e:be:ad:8c:5b:25:df: - 25:d4:49:8c:d6:11:bf:79:43:a2:88:7f:70:87:8c:fb:51:9a: - 4c:73:8d:10:e7:5b:fa:fb:76:f9:88:7a:6a:d0:bf:0f:65:1e: - 26:22:87:57:31:9a:c9:4c:62:cf:ef:00:2b:4e:2f:ee:d4:d8: - 0d:2f:7f:2e:14:21:d5:c3:25:ce:29:a3:f0:ee:c6:3d:d2:dc: - 7b:80:34:57:50:97:e7:79:d9:ca:39:10:73:2d:46:f4:98:de: - ec:be:98:1a:17:12:c3:9e:1f:0d:25:c8:4e:17:a1:4a:8d:6a: - 21:11:42:56:1a:16:79:12:e2:db:39:e1:5d:c4:2e:03:31:54: - d9:97:53:21:bc:f0:60:e1:ba:ff:f6:a5:4b:c1:39:4f:e1:87: - b7:63:9a:63:fa:a2:83:1c:b5:8e:fd:48:be:d5:50:40:0b:69: - 34:81:1e:d1:ca:c5:34:ff:bc:c3:ec:22:a5:3e:ca:31:fe:43: - 39:00:79:72 + 8d:1d:69:d2:44:1f:af:68:30:80:c1:91:b2:2f:9a:7e:ca:ff: + 38:46:8e:28:59:02:2d:e7:74:c4:3c:b3:ac:b3:22:53:e9:54: + 3a:e2:4d:4d:65:63:47:dd:38:86:ec:d1:7d:4f:fe:5d:c6:c8: + c8:10:b8:33:5a:4d:9e:83:e3:92:97:c5:f1:d8:e3:97:6d:01: + 50:03:de:25:d8:e4:de:62:70:b8:c4:55:5b:9f:8c:61:b8:d7: + f0:8f:6c:2d:80:cc:b8:7b:8b:b4:54:9a:d6:e1:f9:7f:52:99: + 7b:ef:23:88:61:e5:7c:85:5c:57:98:cc:a6:98:4b:71:84:5c: + ab:5e:82:48:5a:da:5f:d6:84:b5:52:43:df:3c:0f:95:06:29: + 00:94:f8:98:94:6d:1c:c8:76:21:7a:2f:61:34:ab:bd:27:59: + d1:41:99:91:69:68:f7:b6:65:21:e8:9a:b1:9b:ac:72:12:17: + 54:0b:56:08:bd:9d:6b:0e:35:4a:f8:97:b6:83:00:55:96:0c: + 66:13:06:c9:27:5f:cc:d0:81:4b:3e:6e:d2:85:cd:79:7a:8c: + a0:1e:d8:9b:e4:da:e9:ba:51:f1:29:0f:69:00:df:24:a0:55: + 5e:cd:d0:84:c9:4a:a8:b4:12:33:29:6f:8a:8c:d7:a1:b4:8b: + 4a:7d:a2:30 -----BEGIN CERTIFICATE----- -MIIC7DCCAdQCFAwmFd+PcR1qMdDar2TvgN6smkZ4MA0GCSqGSIb3DQEBCwUAMBEx -DzANBgNVBAMMBkNBUm9vdDAeFw0yMTA0MjMxNzA4NTFaFw0zMTA0MjExNzA4NTFa -MFQxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEChMNQXBhY2hlIFB1 -bHNhcjEPMA0GA1UECxMGQnJva2VyMQ8wDQYDVQQDEwZCcm9rZXIwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKd9wqEyUkyyliBhJfqJLJU9Y/B8qqCl9y -ks236kVHcfBjT1gaPfrOpnOQwKn3JfB2de2yAxe+2IpW809qTH4DZZXlReuNR+hg -Xp44dFBUZaDs2FxlYDQbloN9cdRdf+NiWWfo8NYkfcBuNwNUTD0MMzmbM+FSRMVD -2uruLPMcFi5GTHyfXU1u/owjnvd+nznBcQZS9CaaItTPxSU5qdLkJMbYSkii7nYl -yzzwv80Qd/+BEUMhzDvMEHoHhPzMAqJF3pEta9HtFxrQRvSufbOJ+DF3leVGsakx -1tjjRwCygYHbihzZ8c3jTTX2OJEN6gfwsAZPLEx1wjf/NQ2xQgYLAgMBAAEwDQYJ -KoZIhvcNAQELBQADggEBAEaEgX5KkSrA1wxaovtuouFmFbmzUByTjGi6kEIHLNHZ -IlPE53SprAwly67Jock1SV0Qxu4IKiPzpIckksRONbgjjr6tjFsl3yXUSYzWEb95 -Q6KIf3CHjPtRmkxzjRDnW/r7dvmIemrQvw9lHiYih1cxmslMYs/vACtOL+7U2A0v -fy4UIdXDJc4po/Duxj3S3HuANFdQl+d52co5EHMtRvSY3uy+mBoXEsOeHw0lyE4X -oUqNaiERQlYaFnkS4ts54V3ELgMxVNmXUyG88GDhuv/2pUvBOU/hh7djmmP6ooMc -tY79SL7VUEALaTSBHtHKxTT/vMPsIqU+yjH+QzkAeXI= +MIIDETCCAfmgAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgcwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowVDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQKEw1BcGFj +aGUgUHVsc2FyMQ8wDQYDVQQLEwZCcm9rZXIxDzANBgNVBAMTBkJyb2tlcjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMp33CoTJSTLKWIGEl+okslT1j8H +yqoKX3KSzbfqRUdx8GNPWBo9+s6mc5DAqfcl8HZ17bIDF77YilbzT2pMfgNlleVF +641H6GBenjh0UFRloOzYXGVgNBuWg31x1F1/42JZZ+jw1iR9wG43A1RMPQwzOZsz +4VJExUPa6u4s8xwWLkZMfJ9dTW7+jCOe936fOcFxBlL0Jpoi1M/FJTmp0uQkxthK +SKLudiXLPPC/zRB3/4ERQyHMO8wQegeE/MwCokXekS1r0e0XGtBG9K59s4n4MXeV +5UaxqTHW2ONHALKBgduKHNnxzeNNNfY4kQ3qB/CwBk8sTHXCN/81DbFCBgsCAwEA +AaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUA +A4IBAQCNHWnSRB+vaDCAwZGyL5p+yv84Ro4oWQIt53TEPLOssyJT6VQ64k1NZWNH +3TiG7NF9T/5dxsjIELgzWk2eg+OSl8Xx2OOXbQFQA94l2OTeYnC4xFVbn4xhuNfw +j2wtgMy4e4u0VJrW4fl/Upl77yOIYeV8hVxXmMymmEtxhFyrXoJIWtpf1oS1UkPf +PA+VBikAlPiYlG0cyHYhei9hNKu9J1nRQZmRaWj3tmUh6Jqxm6xyEhdUC1YIvZ1r +DjVK+Je2gwBVlgxmEwbJJ1/M0IFLPm7Shc15eoygHtib5NrpulHxKQ9pAN8koFVe +zdCEyUqotBIzKW+KjNehtItKfaIw -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cacert.pem b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cacert.pem index dc75fe9506eaf..127f56dd777a5 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cacert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cacert.pem @@ -2,76 +2,76 @@ Certificate: Data: Version: 3 (0x2) Serial Number: - 33:a3:2e:28:58:0b:7a:7b:3c:71:4e:51:1d:1d:16:f5:72:3d:99:01 + 77:4f:f6:cf:99:ca:77:e8:a7:6e:1e:fd:e2:cf:ac:a9:da:68:d2:42 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: CN = CARoot Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: - 00:d9:06:95:38:4a:ed:0d:ef:57:12:26:5e:2f:ea: - 3c:05:78:1e:36:90:6c:d6:8d:dc:18:e7:e0:24:d7: - 72:ae:d3:af:6a:ff:32:1f:ee:d8:93:9e:f4:53:88: - 0f:5d:d6:56:41:03:b9:1e:d7:d4:0d:d5:ae:27:20: - d8:8f:e3:7d:65:79:d3:00:c9:cc:f4:ef:f5:c9:f6: - 83:a4:45:b4:6d:11:ac:fc:55:f2:94:6b:75:74:d9: - f7:23:b2:5a:ba:a3:21:b4:6e:5a:2d:fc:84:32:ef: - 78:f5:d7:22:7c:e8:a8:15:aa:1d:9f:53:63:fd:77: - f4:d7:20:cc:21:34:1c:7a:22:a9:6a:de:90:06:ae: - 10:ff:96:21:61:9e:6d:21:f5:66:37:ef:a0:5a:a8: - 51:5f:22:24:9f:a9:a9:b3:21:10:f4:7a:d9:ee:c3: - 20:73:c3:48:0a:c7:98:7c:5f:04:7a:e1:eb:8c:d6: - f0:18:d7:e9:0c:11:cd:a1:81:f4:d4:67:c0:72:0f: - e3:90:86:92:97:bd:bc:44:df:b1:b3:6d:85:4f:6b: - fa:bf:9e:6a:1d:9c:77:23:3b:6f:89:38:fb:45:ff: - f5:76:b3:19:f7:7c:59:2b:07:ff:6a:4a:f5:93:4a: - 62:ef:18:3b:ea:54:8f:2d:c2:34:c8:a3:6f:ee:f8: - f2:a3 + 00:b8:5e:c2:60:ed:c4:ee:3c:5b:ab:fc:64:52:f3: + 30:41:fc:10:5a:ac:a6:9b:0a:93:d0:d0:c9:bf:96: + 14:a7:cf:5c:3e:23:91:7e:54:ec:fe:2d:9f:c9:34: + d1:4e:95:2f:85:9c:cc:be:90:a3:a4:cb:4d:a4:72: + d2:84:e0:c7:42:c4:bf:70:b6:fa:d2:45:8b:83:66: + 1e:a4:e9:0e:06:a3:46:ea:a7:18:cd:33:b9:f1:ff: + 76:91:72:8f:cd:f9:93:43:c3:6e:17:1f:2d:86:df: + b6:fb:2d:d6:be:2d:98:ad:de:00:c7:de:f9:68:b5: + 40:40:56:49:ae:23:e5:a1:3b:5f:15:5a:44:50:da: + fb:02:d3:42:c6:87:0d:c0:8d:3a:e6:e2:aa:73:31: + ab:79:58:51:cd:03:80:f3:12:ce:2f:35:04:8b:39: + 5f:b0:cc:b8:41:99:47:c1:17:96:8b:c2:44:84:b5: + 21:8a:15:52:fe:1a:5a:f9:88:cc:11:17:ee:48:dd: + ba:bf:ed:67:6e:27:35:42:cf:07:5e:b1:8b:81:55: + 92:01:8e:61:fd:8e:82:74:b1:70:7a:3d:52:1f:16: + 78:12:bb:b5:09:62:ce:6d:18:4a:e9:f5:27:19:bc: + 93:4e:ed:dd:53:a8:c1:bb:48:b7:18:20:7b:79:48: + 48:9d Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: - 86:1F:20:03:1D:EA:65:52:AA:D7:38:B7:A7:B1:DC:0A:02:F9:F2:02 + 0F:46:61:3E:6F:71:22:E6:1F:32:37:7C:B2:81:A6:CC:DB:9D:F5:7C X509v3 Authority Key Identifier: - keyid:86:1F:20:03:1D:EA:65:52:AA:D7:38:B7:A7:B1:DC:0A:02:F9:F2:02 + keyid:0F:46:61:3E:6F:71:22:E6:1F:32:37:7C:B2:81:A6:CC:DB:9D:F5:7C X509v3 Basic Constraints: critical CA:TRUE Signature Algorithm: sha256WithRSAEncryption - c3:8a:4d:5b:3a:01:28:08:cc:cd:8b:cc:37:0d:0b:0c:45:dd: - c0:44:ee:36:9c:1d:7d:1f:b9:5a:a7:fd:9a:19:34:0f:8c:09: - 9d:24:f1:7b:a2:22:ef:7f:f3:4f:31:e2:b8:a5:f2:ec:d5:32: - 02:f3:10:c4:82:c4:a0:33:b0:50:53:b7:2e:3d:78:30:8e:b3: - c1:f8:51:4d:30:5b:40:65:6f:ad:b8:99:be:d8:cc:3b:43:00: - 2b:16:5c:9c:bd:83:24:a0:48:0d:cd:2e:29:74:a8:e6:bc:df: - f0:7c:2c:1f:03:72:f4:47:4d:88:e6:8f:53:77:25:23:57:0a: - 84:fb:38:e7:b0:84:57:2b:4d:5a:f0:94:34:8a:48:ca:dc:f7: - 08:b5:d5:1e:64:b4:03:c9:f3:3d:dd:f5:27:ac:f8:2b:d5:80: - ab:b5:b1:37:8e:ae:2f:03:c2:19:4d:37:d6:e2:76:24:a2:98: - ed:c8:c5:d0:65:29:4d:ce:0a:bf:d0:a3:3f:f6:03:47:fa:75: - 8c:06:22:fe:8a:13:9a:9c:17:f5:35:71:7d:66:b9:cd:ca:ac: - 1e:c3:09:c6:76:b0:6c:2b:45:fd:5b:a9:02:7b:e8:fa:65:32: - e3:8e:7d:25:6e:06:db:bc:fd:5b:ad:78:d3:e0:09:df:3d:9c: - 3b:56:c5:69 + 91:e8:d8:c4:32:2e:80:5c:d4:cb:24:7a:81:43:a9:c7:95:90: + 1a:2e:7a:d3:0c:5d:b6:21:05:67:4d:98:5a:0d:71:ea:80:01: + 95:42:fe:fa:f1:7c:dc:bd:76:ff:05:26:3b:f0:94:b3:09:2c: + 34:dd:43:56:46:2b:15:35:99:d9:94:54:22:cf:a6:68:b0:d1: + 79:e2:f0:9f:0b:02:7c:cf:1f:bd:d0:f6:49:c6:82:28:a5:c6: + ae:94:65:cf:fd:ad:a8:6c:c2:17:da:db:f3:be:30:1a:1b:b4: + 2c:fa:08:71:9d:64:09:45:02:92:02:ad:eb:15:47:14:43:5b: + a8:2d:1a:ec:14:93:dc:ff:bb:51:33:a3:d5:4d:e2:77:ca:e1: + a5:98:5c:7a:b6:10:19:d3:d7:f5:14:a5:d5:08:f1:97:18:3d: + 5f:a6:4e:a2:4a:0d:4b:d4:bb:56:6b:a8:44:35:62:c5:d8:c6: + 67:11:93:1c:22:64:3e:aa:15:08:dc:87:39:dd:f6:e0:a0:d5: + 00:db:27:79:3d:f4:35:7c:46:a9:fa:0c:fa:fc:74:f5:bf:f4: + fe:71:40:45:33:22:35:83:f7:1a:96:2a:fc:b2:33:e0:1a:e8: + 24:48:91:5d:90:5c:4c:93:33:4c:40:de:26:bb:24:ac:48:9b: + ae:fe:19:34 -----BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIUM6MuKFgLens8cU5RHR0W9XI9mQEwDQYJKoZIhvcNAQEL -BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIxMDQyMzE3MDg1MVoXDTMxMDQyMTE3 -MDg1MVowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA2QaVOErtDe9XEiZeL+o8BXgeNpBs1o3cGOfgJNdyrtOvav8yH+7Y -k570U4gPXdZWQQO5HtfUDdWuJyDYj+N9ZXnTAMnM9O/1yfaDpEW0bRGs/FXylGt1 -dNn3I7JauqMhtG5aLfyEMu949dcifOioFaodn1Nj/Xf01yDMITQceiKpat6QBq4Q -/5YhYZ5tIfVmN++gWqhRXyIkn6mpsyEQ9HrZ7sMgc8NICseYfF8EeuHrjNbwGNfp -DBHNoYH01GfAcg/jkIaSl728RN+xs22FT2v6v55qHZx3IztviTj7Rf/1drMZ93xZ -Kwf/akr1k0pi7xg76lSPLcI0yKNv7vjyowIDAQABo1MwUTAdBgNVHQ4EFgQUhh8g -Ax3qZVKq1zi3p7HcCgL58gIwHwYDVR0jBBgwFoAUhh8gAx3qZVKq1zi3p7HcCgL5 -8gIwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw4pNWzoBKAjM -zYvMNw0LDEXdwETuNpwdfR+5Wqf9mhk0D4wJnSTxe6Ii73/zTzHiuKXy7NUyAvMQ -xILEoDOwUFO3Lj14MI6zwfhRTTBbQGVvrbiZvtjMO0MAKxZcnL2DJKBIDc0uKXSo -5rzf8HwsHwNy9EdNiOaPU3clI1cKhPs457CEVytNWvCUNIpIytz3CLXVHmS0A8nz -Pd31J6z4K9WAq7WxN46uLwPCGU031uJ2JKKY7cjF0GUpTc4Kv9CjP/YDR/p1jAYi -/ooTmpwX9TVxfWa5zcqsHsMJxnawbCtF/VupAnvo+mUy4459JW4G27z9W6140+AJ -3z2cO1bFaQ== +MIIDAzCCAeugAwIBAgIUd0/2z5nKd+inbh794s+sqdpo0kIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuF7CYO3E7jxbq/xkUvMwQfwQWqymmwqT0NDJv5YUp89cPiORflTs +/i2fyTTRTpUvhZzMvpCjpMtNpHLShODHQsS/cLb60kWLg2YepOkOBqNG6qcYzTO5 +8f92kXKPzfmTQ8NuFx8tht+2+y3Wvi2Yrd4Ax975aLVAQFZJriPloTtfFVpEUNr7 +AtNCxocNwI065uKqczGreVhRzQOA8xLOLzUEizlfsMy4QZlHwReWi8JEhLUhihVS +/hpa+YjMERfuSN26v+1nbic1Qs8HXrGLgVWSAY5h/Y6CdLFwej1SHxZ4Eru1CWLO +bRhK6fUnGbyTTu3dU6jBu0i3GCB7eUhInQIDAQABo1MwUTAdBgNVHQ4EFgQUD0Zh +Pm9xIuYfMjd8soGmzNud9XwwHwYDVR0jBBgwFoAUD0ZhPm9xIuYfMjd8soGmzNud +9XwwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAkejYxDIugFzU +yyR6gUOpx5WQGi560wxdtiEFZ02YWg1x6oABlUL++vF83L12/wUmO/CUswksNN1D +VkYrFTWZ2ZRUIs+maLDReeLwnwsCfM8fvdD2ScaCKKXGrpRlz/2tqGzCF9rb874w +Ghu0LPoIcZ1kCUUCkgKt6xVHFENbqC0a7BST3P+7UTOj1U3id8rhpZhcerYQGdPX +9RSl1Qjxlxg9X6ZOokoNS9S7VmuoRDVixdjGZxGTHCJkPqoVCNyHOd324KDVANsn +eT30NXxGqfoM+vx09b/0/nFARTMiNYP3GpYq/LIz4BroJEiRXZBcTJMzTEDeJrsk +rEibrv4ZNA== -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cert.pem b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cert.pem index 0ac579026ef26..1a21d9d41387f 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/client-cert.pem @@ -1,13 +1,13 @@ Certificate: Data: - Version: 1 (0x0) + Version: 3 (0x2) Serial Number: - 0c:26:15:df:8f:71:1d:6a:31:d0:da:af:64:ef:80:de:ac:9a:46:79 + 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:03 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: C = US, ST = CA, O = Apache Pulsar, OU = Client, CN = Client Subject Public Key Info: Public Key Algorithm: rsaEncryption @@ -32,37 +32,41 @@ Certificate: 8e:18:48:4c:5f:19:e9:b0:7b:22:d3:bc:42:32:45: 9a:d1 Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 Signature Algorithm: sha256WithRSAEncryption - a4:bb:d2:e4:ba:17:1f:07:13:26:ac:e1:71:df:1e:d4:d7:a7: - 31:dd:df:ce:e6:bb:11:fb:cf:a5:66:d2:fb:0e:26:90:fd:94: - 0d:d2:d6:91:f3:65:75:ae:16:b6:92:2e:0a:41:b5:fc:ba:33: - 57:85:92:e8:a3:30:97:d9:26:dc:e0:37:da:c5:bd:5f:e9:dd: - db:81:cb:38:96:99:6e:d2:a5:6d:92:a8:6d:be:03:6f:a9:48: - 4a:a1:4b:91:f9:c3:11:85:79:1e:4e:77:98:ff:43:dd:e0:f9: - 8e:95:fe:f3:e2:eb:48:72:cf:04:fe:3d:78:b3:a8:ee:56:c8: - 12:c8:0a:3d:70:f4:86:42:d2:b9:54:4d:07:8c:45:ad:af:b9: - 43:c8:f9:ee:fc:5d:96:a2:b6:d5:d9:48:57:4e:b5:7d:c7:8c: - 35:21:99:13:9a:60:42:1f:39:4a:3a:1b:3b:e5:ab:1d:91:59: - 8a:e1:82:9e:70:79:f9:9a:6e:bb:a9:99:30:4d:93:c8:bf:95: - 91:a1:03:a3:ac:d8:cd:80:db:89:82:a7:e6:74:8d:53:b3:a6: - 7a:b9:ca:93:14:a2:01:08:bd:9f:4e:2d:0d:50:b3:aa:e8:a6: - a8:43:b5:d6:a4:1c:2f:62:7a:1f:1b:92:6b:2d:fa:12:c3:1a: - ed:8b:11:fe + 8b:88:90:00:1a:15:fa:11:f2:f0:35:6f:0f:f2:76:74:fc:8d: + bc:03:ee:a5:c5:21:17:c9:01:6b:58:93:fa:3e:7b:e0:0d:6d: + db:1f:2a:48:fa:15:34:66:b7:cb:be:82:c6:28:91:99:42:5a: + 36:b6:0b:2f:bb:85:14:88:a9:ea:dd:0a:7a:be:c4:e7:b2:2d: + 82:a9:37:bc:d9:5c:aa:03:2e:54:68:b1:b7:e8:d6:45:a5:8f: + 48:45:2c:9c:7a:55:0a:4a:07:1b:30:8a:49:6d:f4:62:b1:9e: + 92:0e:d9:34:44:6c:6d:e7:a3:18:bb:85:58:6d:da:20:83:d5: + ca:65:63:1e:3b:e6:df:7b:97:40:4f:b1:59:63:a9:b5:80:6f: + 97:51:53:a1:d3:29:1f:1a:26:05:17:59:3e:16:4f:5f:38:36: + 76:30:c6:bf:1e:3e:ed:39:83:91:31:58:01:13:59:5c:c5:e9: + d6:61:e0:f3:5f:c7:47:8a:5f:af:23:98:89:7b:b4:e6:f6:51: + 98:a0:26:31:c8:67:91:6d:d5:68:75:3d:4d:48:44:5f:3b:9c: + df:a7:87:a0:11:02:d2:13:5f:c1:4c:3f:3e:09:59:2e:fc:cb: + c2:c5:f0:f8:91:df:c3:dd:ad:c8:fc:44:23:9b:78:0d:3b:f2: + 82:f6:02:82 -----BEGIN CERTIFICATE----- -MIIC7DCCAdQCFAwmFd+PcR1qMdDar2TvgN6smkZ5MA0GCSqGSIb3DQEBCwUAMBEx -DzANBgNVBAMMBkNBUm9vdDAeFw0yMTA0MjMxNzA4NTFaFw0zMTA0MjExNzA4NTFa -MFQxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEChMNQXBhY2hlIFB1 -bHNhcjEPMA0GA1UECxMGQ2xpZW50MQ8wDQYDVQQDEwZDbGllbnQwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDeHhC9ZBPBbHpJhgE7q6sd7LKTQWxsIfLm -FRtRzq1n/Rg+f3pkomJfLgtZtO3ZFw63vFBmQbfjxHHJc3M92G00gPLjuZiPK1QU -lbNRG9aRhc23NKJQtvGGbgcw+q5VoF35fByRUGJ9uxSGkgqsKT4oG5nKMGPcqV8F -+Dg+MBACn8yU10fgGvQcaJY9El5YIUEs7JatnghWg3qSX0vmvQEWcCivqicdxP6y -Cb+ltEfZWEv+QYEOokZXwTl8jeSxpyXmtN3zniTJ58CMGrSr3bkzvxHLvrsi9/yt -xEBB1+83CBqVRR/bFF8L+Ej/QSTLXI4YSExfGemweyLTvEIyRZrRAgMBAAEwDQYJ -KoZIhvcNAQELBQADggEBAKS70uS6Fx8HEyas4XHfHtTXpzHd387muxH7z6Vm0vsO -JpD9lA3S1pHzZXWuFraSLgpBtfy6M1eFkuijMJfZJtzgN9rFvV/p3duByziWmW7S -pW2SqG2+A2+pSEqhS5H5wxGFeR5Od5j/Q93g+Y6V/vPi60hyzwT+PXizqO5WyBLI -Cj1w9IZC0rlUTQeMRa2vuUPI+e78XZaittXZSFdOtX3HjDUhmROaYEIfOUo6Gzvl -qx2RWYrhgp5wefmabrupmTBNk8i/lZGhA6Os2M2A24mCp+Z0jVOzpnq5ypMUogEI -vZ9OLQ1Qs6ropqhDtdakHC9ieh8bkmst+hLDGu2LEf4= +MIIDETCCAfmgAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgMwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowVDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQKEw1BcGFj +aGUgUHVsc2FyMQ8wDQYDVQQLEwZDbGllbnQxDzANBgNVBAMTBkNsaWVudDCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN4eEL1kE8FsekmGATurqx3sspNB +bGwh8uYVG1HOrWf9GD5/emSiYl8uC1m07dkXDre8UGZBt+PEcclzcz3YbTSA8uO5 +mI8rVBSVs1Eb1pGFzbc0olC28YZuBzD6rlWgXfl8HJFQYn27FIaSCqwpPigbmcow +Y9ypXwX4OD4wEAKfzJTXR+Aa9Bxolj0SXlghQSzslq2eCFaDepJfS+a9ARZwKK+q +Jx3E/rIJv6W0R9lYS/5BgQ6iRlfBOXyN5LGnJea03fOeJMnnwIwatKvduTO/Ecu+ +uyL3/K3EQEHX7zcIGpVFH9sUXwv4SP9BJMtcjhhITF8Z6bB7ItO8QjJFmtECAwEA +AaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUA +A4IBAQCLiJAAGhX6EfLwNW8P8nZ0/I28A+6lxSEXyQFrWJP6PnvgDW3bHypI+hU0 +ZrfLvoLGKJGZQlo2tgsvu4UUiKnq3Qp6vsTnsi2CqTe82VyqAy5UaLG36NZFpY9I +RSycelUKSgcbMIpJbfRisZ6SDtk0RGxt56MYu4VYbdogg9XKZWMeO+bfe5dAT7FZ +Y6m1gG+XUVOh0ykfGiYFF1k+Fk9fODZ2MMa/Hj7tOYORMVgBE1lcxenWYeDzX8dH +il+vI5iJe7Tm9lGYoCYxyGeRbdVodT1NSERfO5zfp4egEQLSE1/BTD8+CVku/MvC +xfD4kd/D3a3I/EQjm3gNO/KC9gKC -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cacert.pem b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cacert.pem index cb22ab5057372..127f56dd777a5 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cacert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cacert.pem @@ -2,76 +2,76 @@ Certificate: Data: Version: 3 (0x2) Serial Number: - 2d:fc:78:73:ca:55:1e:32:12:3e:ef:08:24:cf:63:95:1e:ad:ea:ae + 77:4f:f6:cf:99:ca:77:e8:a7:6e:1e:fd:e2:cf:ac:a9:da:68:d2:42 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: CN = CARoot Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: - 00:c3:e0:f7:5d:bb:9a:76:ee:84:c6:2d:79:3f:a6: - 4b:3b:1f:32:31:d9:65:80:d3:02:13:23:2a:f1:2f: - e6:ac:bc:24:d1:cb:b9:5b:ed:cb:63:fe:31:e4:e6: - b8:f3:13:72:be:48:57:cb:d1:70:0f:67:16:6d:26: - bc:23:1c:64:30:ee:c8:0e:0e:68:d9:43:7e:42:74: - 7a:d4:59:a4:76:67:70:9f:85:aa:f3:9f:6c:e6:a1: - b5:06:3c:1d:46:38:45:05:df:88:cc:3a:ad:6c:72: - 96:69:55:d0:b2:a8:ed:fd:b8:07:6b:5c:6d:1c:0d: - 98:c2:88:3f:59:3c:d6:6c:ab:df:dd:3a:c0:5c:fe: - 86:74:38:bc:00:d4:f0:50:ea:f0:e6:74:23:48:6d: - 63:77:c7:f6:e2:94:f8:1b:0f:51:98:f6:fb:e0:20: - 58:c1:b6:a0:58:08:6f:ad:05:f7:71:90:b3:1a:5b: - 24:88:0b:ed:71:26:aa:84:c2:21:97:76:e7:d5:77: - 30:62:15:d4:30:5e:f9:aa:bc:7f:1f:50:5e:92:47: - f2:92:c0:85:cf:ce:33:07:24:e9:ee:b7:04:0d:b7: - 9f:82:ae:a0:b6:73:51:8f:fe:bd:2c:f3:b5:76:61: - 3c:da:c6:c0:bd:44:46:6f:43:9d:47:b6:0a:80:a5: - fe:3b + 00:b8:5e:c2:60:ed:c4:ee:3c:5b:ab:fc:64:52:f3: + 30:41:fc:10:5a:ac:a6:9b:0a:93:d0:d0:c9:bf:96: + 14:a7:cf:5c:3e:23:91:7e:54:ec:fe:2d:9f:c9:34: + d1:4e:95:2f:85:9c:cc:be:90:a3:a4:cb:4d:a4:72: + d2:84:e0:c7:42:c4:bf:70:b6:fa:d2:45:8b:83:66: + 1e:a4:e9:0e:06:a3:46:ea:a7:18:cd:33:b9:f1:ff: + 76:91:72:8f:cd:f9:93:43:c3:6e:17:1f:2d:86:df: + b6:fb:2d:d6:be:2d:98:ad:de:00:c7:de:f9:68:b5: + 40:40:56:49:ae:23:e5:a1:3b:5f:15:5a:44:50:da: + fb:02:d3:42:c6:87:0d:c0:8d:3a:e6:e2:aa:73:31: + ab:79:58:51:cd:03:80:f3:12:ce:2f:35:04:8b:39: + 5f:b0:cc:b8:41:99:47:c1:17:96:8b:c2:44:84:b5: + 21:8a:15:52:fe:1a:5a:f9:88:cc:11:17:ee:48:dd: + ba:bf:ed:67:6e:27:35:42:cf:07:5e:b1:8b:81:55: + 92:01:8e:61:fd:8e:82:74:b1:70:7a:3d:52:1f:16: + 78:12:bb:b5:09:62:ce:6d:18:4a:e9:f5:27:19:bc: + 93:4e:ed:dd:53:a8:c1:bb:48:b7:18:20:7b:79:48: + 48:9d Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: - 4E:9B:EB:E2:41:17:D1:24:AF:39:02:BC:42:D6:81:B7:62:6D:E3:57 + 0F:46:61:3E:6F:71:22:E6:1F:32:37:7C:B2:81:A6:CC:DB:9D:F5:7C X509v3 Authority Key Identifier: - keyid:4E:9B:EB:E2:41:17:D1:24:AF:39:02:BC:42:D6:81:B7:62:6D:E3:57 + keyid:0F:46:61:3E:6F:71:22:E6:1F:32:37:7C:B2:81:A6:CC:DB:9D:F5:7C X509v3 Basic Constraints: critical CA:TRUE Signature Algorithm: sha256WithRSAEncryption - 16:01:53:ab:85:57:5f:92:b9:24:85:c5:70:02:fa:fe:ae:ff: - e9:3e:36:24:6e:9e:34:dd:7c:56:f9:31:a1:d1:ae:63:af:3c: - 2c:e5:8e:47:34:df:b0:1c:33:48:3f:e7:32:fd:a8:38:99:a6: - ef:e1:7b:65:92:80:1e:68:e5:98:db:c5:50:4a:35:53:e5:86: - 89:56:85:0c:6e:da:64:28:68:33:dc:29:3f:41:8b:cf:9c:ec: - fc:74:15:19:ff:da:0a:ef:d0:51:67:97:ad:2f:e4:8a:94:52: - 96:18:bd:77:b3:2b:79:9a:f8:de:af:0f:a2:65:c4:f2:88:3a: - 57:79:18:e1:d8:7c:e0:52:da:35:8c:dd:d9:75:0d:72:e9:e8: - d0:a7:a6:0b:49:88:6d:ed:86:45:25:72:15:4e:2a:0b:6f:9c: - 2f:48:75:28:b0:aa:cd:15:7f:ae:b3:b7:ec:75:d9:63:c8:46: - 8f:84:49:1c:e2:db:95:7b:3d:bb:fd:98:45:53:56:3c:3c:de: - 60:16:f9:14:b8:7e:27:37:be:f0:69:b5:a0:18:bc:83:1e:c1: - 3a:11:9b:a3:1d:1f:a6:9c:7e:c9:aa:7c:53:44:9e:1d:cb:ca: - c8:22:7f:cc:ad:e6:fa:51:54:4d:b5:a1:e6:e3:04:4e:49:1e: - 67:9c:93:30 + 91:e8:d8:c4:32:2e:80:5c:d4:cb:24:7a:81:43:a9:c7:95:90: + 1a:2e:7a:d3:0c:5d:b6:21:05:67:4d:98:5a:0d:71:ea:80:01: + 95:42:fe:fa:f1:7c:dc:bd:76:ff:05:26:3b:f0:94:b3:09:2c: + 34:dd:43:56:46:2b:15:35:99:d9:94:54:22:cf:a6:68:b0:d1: + 79:e2:f0:9f:0b:02:7c:cf:1f:bd:d0:f6:49:c6:82:28:a5:c6: + ae:94:65:cf:fd:ad:a8:6c:c2:17:da:db:f3:be:30:1a:1b:b4: + 2c:fa:08:71:9d:64:09:45:02:92:02:ad:eb:15:47:14:43:5b: + a8:2d:1a:ec:14:93:dc:ff:bb:51:33:a3:d5:4d:e2:77:ca:e1: + a5:98:5c:7a:b6:10:19:d3:d7:f5:14:a5:d5:08:f1:97:18:3d: + 5f:a6:4e:a2:4a:0d:4b:d4:bb:56:6b:a8:44:35:62:c5:d8:c6: + 67:11:93:1c:22:64:3e:aa:15:08:dc:87:39:dd:f6:e0:a0:d5: + 00:db:27:79:3d:f4:35:7c:46:a9:fa:0c:fa:fc:74:f5:bf:f4: + fe:71:40:45:33:22:35:83:f7:1a:96:2a:fc:b2:33:e0:1a:e8: + 24:48:91:5d:90:5c:4c:93:33:4c:40:de:26:bb:24:ac:48:9b: + ae:fe:19:34 -----BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIULfx4c8pVHjISPu8IJM9jlR6t6q4wDQYJKoZIhvcNAQEL -BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIxMDQyMzE3MDg1MVoXDTMxMDQyMTE3 -MDg1MVowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAw+D3Xbuadu6Exi15P6ZLOx8yMdllgNMCEyMq8S/mrLwk0cu5W+3L -Y/4x5Oa48xNyvkhXy9FwD2cWbSa8IxxkMO7IDg5o2UN+QnR61Fmkdmdwn4Wq859s -5qG1BjwdRjhFBd+IzDqtbHKWaVXQsqjt/bgHa1xtHA2Ywog/WTzWbKvf3TrAXP6G -dDi8ANTwUOrw5nQjSG1jd8f24pT4Gw9RmPb74CBYwbagWAhvrQX3cZCzGlskiAvt -cSaqhMIhl3bn1XcwYhXUMF75qrx/H1BekkfyksCFz84zByTp7rcEDbefgq6gtnNR -j/69LPO1dmE82sbAvURGb0OdR7YKgKX+OwIDAQABo1MwUTAdBgNVHQ4EFgQUTpvr -4kEX0SSvOQK8QtaBt2Jt41cwHwYDVR0jBBgwFoAUTpvr4kEX0SSvOQK8QtaBt2Jt -41cwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAFgFTq4VXX5K5 -JIXFcAL6/q7/6T42JG6eNN18VvkxodGuY688LOWORzTfsBwzSD/nMv2oOJmm7+F7 -ZZKAHmjlmNvFUEo1U+WGiVaFDG7aZChoM9wpP0GLz5zs/HQVGf/aCu/QUWeXrS/k -ipRSlhi9d7MreZr43q8PomXE8og6V3kY4dh84FLaNYzd2XUNcuno0KemC0mIbe2G -RSVyFU4qC2+cL0h1KLCqzRV/rrO37HXZY8hGj4RJHOLblXs9u/2YRVNWPDzeYBb5 -FLh+Jze+8Gm1oBi8gx7BOhGbox0fppx+yap8U0SeHcvKyCJ/zK3m+lFUTbWh5uME -TkkeZ5yTMA== +MIIDAzCCAeugAwIBAgIUd0/2z5nKd+inbh794s+sqdpo0kIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuF7CYO3E7jxbq/xkUvMwQfwQWqymmwqT0NDJv5YUp89cPiORflTs +/i2fyTTRTpUvhZzMvpCjpMtNpHLShODHQsS/cLb60kWLg2YepOkOBqNG6qcYzTO5 +8f92kXKPzfmTQ8NuFx8tht+2+y3Wvi2Yrd4Ax975aLVAQFZJriPloTtfFVpEUNr7 +AtNCxocNwI065uKqczGreVhRzQOA8xLOLzUEizlfsMy4QZlHwReWi8JEhLUhihVS +/hpa+YjMERfuSN26v+1nbic1Qs8HXrGLgVWSAY5h/Y6CdLFwej1SHxZ4Eru1CWLO +bRhK6fUnGbyTTu3dU6jBu0i3GCB7eUhInQIDAQABo1MwUTAdBgNVHQ4EFgQUD0Zh +Pm9xIuYfMjd8soGmzNud9XwwHwYDVR0jBBgwFoAUD0ZhPm9xIuYfMjd8soGmzNud +9XwwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAkejYxDIugFzU +yyR6gUOpx5WQGi560wxdtiEFZ02YWg1x6oABlUL++vF83L12/wUmO/CUswksNN1D +VkYrFTWZ2ZRUIs+maLDReeLwnwsCfM8fvdD2ScaCKKXGrpRlz/2tqGzCF9rb874w +Ghu0LPoIcZ1kCUUCkgKt6xVHFENbqC0a7BST3P+7UTOj1U3id8rhpZhcerYQGdPX +9RSl1Qjxlxg9X6ZOokoNS9S7VmuoRDVixdjGZxGTHCJkPqoVCNyHOd324KDVANsn +eT30NXxGqfoM+vx09b/0/nFARTMiNYP3GpYq/LIz4BroJEiRXZBcTJMzTEDeJrsk +rEibrv4ZNA== -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cert.pem b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cert.pem index a4c03e3c2eaa4..e2c1e5a230c26 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/proxy-cert.pem @@ -1,13 +1,13 @@ Certificate: Data: - Version: 1 (0x0) + Version: 3 (0x2) Serial Number: - 0c:26:15:df:8f:71:1d:6a:31:d0:da:af:64:ef:80:de:ac:9a:46:7a + 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:04 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: C = US, ST = CA, O = Apache Pulsar, OU = Proxy, CN = Proxy Subject Public Key Info: Public Key Algorithm: rsaEncryption @@ -32,37 +32,41 @@ Certificate: 29:e1:23:c4:ed:a0:1c:f6:2a:ed:dc:c0:df:97:a9: f3:8d Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 Signature Algorithm: sha256WithRSAEncryption - 7b:27:a8:2a:54:35:76:e5:f8:a7:60:8d:e7:35:12:69:38:f3: - 32:af:25:0f:69:1a:b1:af:79:e5:7c:94:5c:8f:aa:76:95:54: - 35:b4:bb:64:20:1a:91:1e:b3:e4:d1:06:72:24:c3:35:bd:9c: - f6:54:61:d9:39:22:99:42:08:d4:97:aa:7d:82:46:fc:77:58: - df:93:29:03:6c:ba:1c:13:d1:42:49:32:f1:38:09:d3:3e:43: - 89:1b:61:c4:40:f3:ac:4c:c1:36:2f:28:bd:57:a0:de:35:82: - c9:da:93:5f:09:d6:e8:5b:cd:15:45:b3:28:22:7d:48:00:c4: - 55:0f:f6:de:d9:c2:0a:39:5e:69:a4:50:9b:3f:e1:06:44:8a: - 13:af:0b:56:8d:70:c4:9f:d1:a2:b4:25:09:8b:19:47:e8:d2: - 98:49:2a:a0:8b:fe:8c:cb:23:d8:f8:e6:28:c6:d9:0b:10:7c: - d3:ce:48:07:8d:c7:56:bb:c9:e8:d7:a8:a1:24:93:bf:5f:d2: - a9:f1:35:b7:40:ad:08:bf:89:63:e5:49:40:13:e7:1e:6a:77: - 7f:9a:5b:07:0c:eb:80:77:b0:ac:fa:8a:9d:b8:83:53:a1:1e: - 0e:14:2b:c9:50:96:81:c2:c0:0b:d1:c6:b6:2e:ea:98:3e:7b: - ee:5f:09:f7 + 8d:b6:2c:5f:87:13:06:a8:66:ce:11:2a:2c:20:1e:c7:ee:50: + 75:a7:d1:7c:ad:c6:ec:d1:18:d0:fa:aa:00:fa:08:f9:0f:cc: + df:59:9a:6b:1c:18:07:15:84:d0:9a:24:8d:dd:46:79:9c:dc: + 9e:3e:97:10:24:b2:9d:d4:f6:c5:79:58:87:7c:a6:af:cf:69: + 23:fb:43:7a:0f:4d:26:e0:e9:66:c5:ad:fa:88:e2:c5:6e:6a: + ce:70:0c:8f:73:01:d6:fd:a9:1f:31:49:41:17:45:22:cc:a6: + 71:e4:f4:0f:0f:2e:3e:49:0b:5f:04:94:36:49:fa:72:42:c9: + 25:75:84:9a:dc:16:cb:69:44:44:e5:3a:ff:26:f6:44:42:4c: + 6c:e2:56:d6:3e:bc:f2:8b:83:de:e2:91:70:65:b9:d0:dd:a3: + d1:de:53:27:77:13:2d:86:27:c3:40:2f:c1:a5:50:1c:5a:44: + 51:b4:29:11:c3:30:9d:1a:96:25:7a:d6:05:70:ad:06:0d:f2: + 9b:b1:b6:82:39:06:c7:7c:b2:49:04:19:e4:7e:87:b8:d8:42: + 1d:ab:ed:d0:b0:7f:79:6b:89:75:2f:6a:26:67:3d:33:57:5f: + 5a:49:52:98:3b:2a:e5:43:d7:f9:97:ca:75:cd:6f:e9:e4:66: + b6:d6:c2:c7 -----BEGIN CERTIFICATE----- -MIIC6jCCAdICFAwmFd+PcR1qMdDar2TvgN6smkZ6MA0GCSqGSIb3DQEBCwUAMBEx -DzANBgNVBAMMBkNBUm9vdDAeFw0yMTA0MjMxNzA4NTFaFw0zMTA0MjExNzA4NTFa -MFIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEChMNQXBhY2hlIFB1 -bHNhcjEOMAwGA1UECxMFUHJveHkxDjAMBgNVBAMTBVByb3h5MIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw1zFrRfc9NTE6hxgWiRGE9nPwM2DLi+CcOXg -jTO9lbXPxvBU1Y29hw1ibB0/UmZ0/wYzHDzV7S5j2ZbG8ZiCx5RKvGTymzpU7IGZ -vBSCQ4cMa9oDjKoLQdf+J8T5iIE0sf8q4G3QR93BEaVUqVMyzY/2dViOBeTZsaxp -/rZUw602BKJ39VO2dIPVagHglrWir1CPtdedp8K9+DGGCV98CrLbNOGAJRdffW+L -3I7V+c/P9faPav4+lgDJVrDQ40beuaaKXpuOf+oZzKJbdSI8HTZI5PIaAZVhwfB6 -J52DlnTMqQRCCFM0mC6344P58qMp4SPE7aAc9irt3MDfl6nzjQIDAQABMA0GCSqG -SIb3DQEBCwUAA4IBAQB7J6gqVDV25finYI3nNRJpOPMyryUPaRqxr3nlfJRcj6p2 -lVQ1tLtkIBqRHrPk0QZyJMM1vZz2VGHZOSKZQgjUl6p9gkb8d1jfkykDbLocE9FC -STLxOAnTPkOJG2HEQPOsTME2Lyi9V6DeNYLJ2pNfCdboW80VRbMoIn1IAMRVD/be -2cIKOV5ppFCbP+EGRIoTrwtWjXDEn9GitCUJixlH6NKYSSqgi/6MyyPY+OYoxtkL -EHzTzkgHjcdWu8no16ihJJO/X9Kp8TW3QK0Iv4lj5UlAE+ceand/mlsHDOuAd7Cs -+oqduINToR4OFCvJUJaBwsAL0ca2LuqYPnvuXwn3 +MIIDDzCCAfegAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgQwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowUjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQKEw1BcGFj +aGUgUHVsc2FyMQ4wDAYDVQQLEwVQcm94eTEOMAwGA1UEAxMFUHJveHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDXMWtF9z01MTqHGBaJEYT2c/AzYMu +L4Jw5eCNM72Vtc/G8FTVjb2HDWJsHT9SZnT/BjMcPNXtLmPZlsbxmILHlEq8ZPKb +OlTsgZm8FIJDhwxr2gOMqgtB1/4nxPmIgTSx/yrgbdBH3cERpVSpUzLNj/Z1WI4F +5NmxrGn+tlTDrTYEonf1U7Z0g9VqAeCWtaKvUI+1152nwr34MYYJX3wKsts04YAl +F199b4vcjtX5z8/19o9q/j6WAMlWsNDjRt65popem45/6hnMolt1IjwdNkjk8hoB +lWHB8HonnYOWdMypBEIIUzSYLrfjg/nyoynhI8TtoBz2Ku3cwN+XqfONAgMBAAGj +HjAcMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC +AQEAjbYsX4cTBqhmzhEqLCAex+5QdafRfK3G7NEY0PqqAPoI+Q/M31maaxwYBxWE +0Jokjd1GeZzcnj6XECSyndT2xXlYh3ymr89pI/tDeg9NJuDpZsWt+ojixW5qznAM +j3MB1v2pHzFJQRdFIsymceT0Dw8uPkkLXwSUNkn6ckLJJXWEmtwWy2lEROU6/yb2 +REJMbOJW1j688ouD3uKRcGW50N2j0d5TJ3cTLYYnw0AvwaVQHFpEUbQpEcMwnRqW +JXrWBXCtBg3ym7G2gjkGx3yySQQZ5H6HuNhCHavt0LB/eWuJdS9qJmc9M1dfWklS +mDsq5UPX+ZfKdc1v6eRmttbCxw== -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/cacert.pem b/pulsar-proxy/src/test/resources/authentication/tls/cacert.pem index b607fb9d131bf..127f56dd777a5 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/cacert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/cacert.pem @@ -2,76 +2,76 @@ Certificate: Data: Version: 3 (0x2) Serial Number: - 7f:c3:12:28:23:73:86:8e:bb:d6:e6:21:43:e3:72:e8:01:17:3e:d1 + 77:4f:f6:cf:99:ca:77:e8:a7:6e:1e:fd:e2:cf:ac:a9:da:68:d2:42 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: CN = CARoot Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: - 00:b3:6a:94:67:7c:33:90:4e:db:b9:94:b0:a6:1a: - 69:77:bb:33:31:fe:3c:8b:6d:8a:f1:cf:07:d9:87: - 86:ad:45:cf:4c:e3:e7:35:d5:4b:a3:76:27:9b:30: - b1:82:3f:57:29:c9:f0:be:25:49:25:16:64:58:cc: - b0:f1:01:2e:19:69:52:c8:38:64:61:16:b4:a7:ba: - 76:2b:54:e6:a5:80:6c:b6:6c:8a:3c:c1:06:c2:e1: - c1:f3:18:6b:87:08:4b:bb:54:f4:b3:72:1d:f2:ce: - 47:18:5f:82:d3:88:c9:39:7b:71:fc:71:1a:aa:7e: - 55:6c:35:7f:83:c1:60:e7:7d:b1:80:d0:17:7a:ed: - e7:0d:87:8b:59:e3:18:47:e9:cf:de:0d:0e:c6:3e: - 5c:eb:6e:f4:43:95:31:01:2d:e8:f2:ba:8a:bf:ed: - 82:0c:7c:14:14:13:0e:fb:ae:f0:3a:7c:29:ee:55: - 29:ca:46:7a:be:05:9f:fa:75:65:4c:f5:fb:cf:fe: - 92:8d:78:e2:e1:41:55:32:2c:36:a2:ac:96:43:aa: - e2:60:5a:ff:a6:e2:3f:5b:fc:d4:d3:af:cf:78:45: - b5:e7:6e:7d:b6:fa:c4:05:84:a6:49:a7:ac:16:8e: - b2:17:ac:75:76:f0:29:df:c8:da:a2:01:05:25:08: - 4d:8f + 00:b8:5e:c2:60:ed:c4:ee:3c:5b:ab:fc:64:52:f3: + 30:41:fc:10:5a:ac:a6:9b:0a:93:d0:d0:c9:bf:96: + 14:a7:cf:5c:3e:23:91:7e:54:ec:fe:2d:9f:c9:34: + d1:4e:95:2f:85:9c:cc:be:90:a3:a4:cb:4d:a4:72: + d2:84:e0:c7:42:c4:bf:70:b6:fa:d2:45:8b:83:66: + 1e:a4:e9:0e:06:a3:46:ea:a7:18:cd:33:b9:f1:ff: + 76:91:72:8f:cd:f9:93:43:c3:6e:17:1f:2d:86:df: + b6:fb:2d:d6:be:2d:98:ad:de:00:c7:de:f9:68:b5: + 40:40:56:49:ae:23:e5:a1:3b:5f:15:5a:44:50:da: + fb:02:d3:42:c6:87:0d:c0:8d:3a:e6:e2:aa:73:31: + ab:79:58:51:cd:03:80:f3:12:ce:2f:35:04:8b:39: + 5f:b0:cc:b8:41:99:47:c1:17:96:8b:c2:44:84:b5: + 21:8a:15:52:fe:1a:5a:f9:88:cc:11:17:ee:48:dd: + ba:bf:ed:67:6e:27:35:42:cf:07:5e:b1:8b:81:55: + 92:01:8e:61:fd:8e:82:74:b1:70:7a:3d:52:1f:16: + 78:12:bb:b5:09:62:ce:6d:18:4a:e9:f5:27:19:bc: + 93:4e:ed:dd:53:a8:c1:bb:48:b7:18:20:7b:79:48: + 48:9d Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: - 09:93:47:8E:5F:F3:BD:19:A2:77:FD:09:BA:13:A9:B6:C6:75:4E:B0 + 0F:46:61:3E:6F:71:22:E6:1F:32:37:7C:B2:81:A6:CC:DB:9D:F5:7C X509v3 Authority Key Identifier: - keyid:09:93:47:8E:5F:F3:BD:19:A2:77:FD:09:BA:13:A9:B6:C6:75:4E:B0 + keyid:0F:46:61:3E:6F:71:22:E6:1F:32:37:7C:B2:81:A6:CC:DB:9D:F5:7C X509v3 Basic Constraints: critical CA:TRUE Signature Algorithm: sha256WithRSAEncryption - a1:52:44:1e:c0:a1:73:48:98:dd:91:b9:a7:e1:da:c5:48:65: - d2:6d:38:77:b5:fa:f6:f7:c5:e4:b7:51:28:ea:f1:6c:9e:82: - 80:6d:6f:56:9c:3b:31:b8:71:0e:ad:17:f9:8e:c6:7e:87:a9: - 5f:30:1c:0e:17:c8:c7:c2:3c:96:3d:7d:01:a9:ce:d0:cd:c3: - 55:6b:ce:64:35:53:93:c6:8c:4c:3d:0d:38:01:17:7b:e2:d8: - b3:a5:78:46:77:fc:7e:da:16:f8:96:d0:72:35:89:c3:15:8c: - 38:37:8b:7f:ff:01:f9:84:b2:e9:8d:11:64:82:36:e7:ef:86: - a6:de:11:d9:78:b4:07:6c:18:89:aa:d6:6d:a2:d8:24:98:40: - 85:5d:ba:5c:36:75:ad:e8:25:03:2d:94:69:d1:ce:d9:8f:9b: - fd:79:5d:4b:30:7a:de:18:08:5a:54:e9:7b:7d:e2:cb:20:65: - 99:4c:5a:31:de:c8:2c:01:b1:c8:d1:30:1d:33:bd:ef:9b:43: - 4d:ac:7d:20:1f:c3:10:53:2e:1a:99:d5:6c:62:0e:15:b3:bd: - 3c:88:58:88:0c:4f:06:21:b7:a4:8c:eb:9f:63:2e:5e:1d:c8: - 91:39:9a:2b:e3:bf:e4:0a:bd:6e:4d:71:15:4d:e1:af:01:15: - 99:38:25:12 + 91:e8:d8:c4:32:2e:80:5c:d4:cb:24:7a:81:43:a9:c7:95:90: + 1a:2e:7a:d3:0c:5d:b6:21:05:67:4d:98:5a:0d:71:ea:80:01: + 95:42:fe:fa:f1:7c:dc:bd:76:ff:05:26:3b:f0:94:b3:09:2c: + 34:dd:43:56:46:2b:15:35:99:d9:94:54:22:cf:a6:68:b0:d1: + 79:e2:f0:9f:0b:02:7c:cf:1f:bd:d0:f6:49:c6:82:28:a5:c6: + ae:94:65:cf:fd:ad:a8:6c:c2:17:da:db:f3:be:30:1a:1b:b4: + 2c:fa:08:71:9d:64:09:45:02:92:02:ad:eb:15:47:14:43:5b: + a8:2d:1a:ec:14:93:dc:ff:bb:51:33:a3:d5:4d:e2:77:ca:e1: + a5:98:5c:7a:b6:10:19:d3:d7:f5:14:a5:d5:08:f1:97:18:3d: + 5f:a6:4e:a2:4a:0d:4b:d4:bb:56:6b:a8:44:35:62:c5:d8:c6: + 67:11:93:1c:22:64:3e:aa:15:08:dc:87:39:dd:f6:e0:a0:d5: + 00:db:27:79:3d:f4:35:7c:46:a9:fa:0c:fa:fc:74:f5:bf:f4: + fe:71:40:45:33:22:35:83:f7:1a:96:2a:fc:b2:33:e0:1a:e8: + 24:48:91:5d:90:5c:4c:93:33:4c:40:de:26:bb:24:ac:48:9b: + ae:fe:19:34 -----BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIUf8MSKCNzho671uYhQ+Ny6AEXPtEwDQYJKoZIhvcNAQEL -BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIxMDQyMzE3MDg1MVoXDTMxMDQyMTE3 -MDg1MVowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAs2qUZ3wzkE7buZSwphppd7szMf48i22K8c8H2YeGrUXPTOPnNdVL -o3YnmzCxgj9XKcnwviVJJRZkWMyw8QEuGWlSyDhkYRa0p7p2K1TmpYBstmyKPMEG -wuHB8xhrhwhLu1T0s3Id8s5HGF+C04jJOXtx/HEaqn5VbDV/g8Fg532xgNAXeu3n -DYeLWeMYR+nP3g0Oxj5c6270Q5UxAS3o8rqKv+2CDHwUFBMO+67wOnwp7lUpykZ6 -vgWf+nVlTPX7z/6SjXji4UFVMiw2oqyWQ6riYFr/puI/W/zU06/PeEW15259tvrE -BYSmSaesFo6yF6x1dvAp38jaogEFJQhNjwIDAQABo1MwUTAdBgNVHQ4EFgQUCZNH -jl/zvRmid/0JuhOptsZ1TrAwHwYDVR0jBBgwFoAUCZNHjl/zvRmid/0JuhOptsZ1 -TrAwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAoVJEHsChc0iY -3ZG5p+HaxUhl0m04d7X69vfF5LdRKOrxbJ6CgG1vVpw7MbhxDq0X+Y7GfoepXzAc -DhfIx8I8lj19AanO0M3DVWvOZDVTk8aMTD0NOAEXe+LYs6V4Rnf8ftoW+JbQcjWJ -wxWMODeLf/8B+YSy6Y0RZII25++Gpt4R2Xi0B2wYiarWbaLYJJhAhV26XDZ1regl -Ay2UadHO2Y+b/XldSzB63hgIWlTpe33iyyBlmUxaMd7ILAGxyNEwHTO975tDTax9 -IB/DEFMuGpnVbGIOFbO9PIhYiAxPBiG3pIzrn2MuXh3IkTmaK+O/5Aq9bk1xFU3h -rwEVmTglEg== +MIIDAzCCAeugAwIBAgIUd0/2z5nKd+inbh794s+sqdpo0kIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowETEPMA0GA1UEAwwGQ0FSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuF7CYO3E7jxbq/xkUvMwQfwQWqymmwqT0NDJv5YUp89cPiORflTs +/i2fyTTRTpUvhZzMvpCjpMtNpHLShODHQsS/cLb60kWLg2YepOkOBqNG6qcYzTO5 +8f92kXKPzfmTQ8NuFx8tht+2+y3Wvi2Yrd4Ax975aLVAQFZJriPloTtfFVpEUNr7 +AtNCxocNwI065uKqczGreVhRzQOA8xLOLzUEizlfsMy4QZlHwReWi8JEhLUhihVS +/hpa+YjMERfuSN26v+1nbic1Qs8HXrGLgVWSAY5h/Y6CdLFwej1SHxZ4Eru1CWLO +bRhK6fUnGbyTTu3dU6jBu0i3GCB7eUhInQIDAQABo1MwUTAdBgNVHQ4EFgQUD0Zh +Pm9xIuYfMjd8soGmzNud9XwwHwYDVR0jBBgwFoAUD0ZhPm9xIuYfMjd8soGmzNud +9XwwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAkejYxDIugFzU +yyR6gUOpx5WQGi560wxdtiEFZ02YWg1x6oABlUL++vF83L12/wUmO/CUswksNN1D +VkYrFTWZ2ZRUIs+maLDReeLwnwsCfM8fvdD2ScaCKKXGrpRlz/2tqGzCF9rb874w +Ghu0LPoIcZ1kCUUCkgKt6xVHFENbqC0a7BST3P+7UTOj1U3id8rhpZhcerYQGdPX +9RSl1Qjxlxg9X6ZOokoNS9S7VmuoRDVixdjGZxGTHCJkPqoVCNyHOd324KDVANsn +eT30NXxGqfoM+vx09b/0/nFARTMiNYP3GpYq/LIz4BroJEiRXZBcTJMzTEDeJrsk +rEibrv4ZNA== -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/client-cert.pem b/pulsar-proxy/src/test/resources/authentication/tls/client-cert.pem index 0fc458dbe5363..192d686246f1a 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/client-cert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/client-cert.pem @@ -1,13 +1,13 @@ Certificate: Data: - Version: 1 (0x0) + Version: 3 (0x2) Serial Number: - 0c:26:15:df:8f:71:1d:6a:31:d0:da:af:64:ef:80:de:ac:9a:46:74 + 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:01 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: C = US, ST = CA, O = Apache, OU = Apache Pulsar, CN = superUser Subject Public Key Info: Public Key Algorithm: rsaEncryption @@ -32,37 +32,41 @@ Certificate: b6:98:ef:dd:03:82:58:a3:32:dc:90:a1:b6:a6:1e: e1:0b Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 Signature Algorithm: sha256WithRSAEncryption - 33:40:2a:38:48:99:a0:fe:68:4d:07:3b:08:ae:af:a1:7c:ea: - 70:ab:a7:c8:32:b4:ff:9f:5a:51:3b:2b:a2:aa:21:75:44:7d: - be:e7:fb:08:b9:81:e5:4c:cf:01:86:f9:06:63:4f:ce:7a:1d: - cb:1e:9e:8f:d5:0a:54:53:69:91:05:10:2c:b0:4f:d4:3a:b5: - 25:0e:25:4c:eb:67:64:d7:85:29:77:63:30:da:2a:77:3f:59: - c2:8c:e9:02:57:49:93:3a:51:91:1a:b2:59:4d:d5:69:c9:9d: - cc:e2:4f:b2:6c:5b:ba:45:68:c7:f5:18:f4:1d:b8:0c:eb:fd: - 0a:cf:10:5d:dc:3e:26:49:03:33:37:40:f7:96:88:82:99:5c: - 38:8d:cc:3b:de:b5:b9:ee:f9:ac:ae:ce:03:9a:1e:a7:f8:02: - 73:2e:af:e7:b0:22:cb:3d:a3:ca:85:16:e9:e6:e2:d6:bf:1c: - 1a:4c:ea:14:49:52:84:67:38:97:c7:b3:30:72:cc:c6:08:e5: - 40:0a:87:da:19:98:26:4f:0b:54:43:a2:a0:ea:51:b2:23:88: - d2:b4:0e:82:4f:02:92:a4:fb:27:e2:06:15:76:e7:27:f2:a2: - e4:23:7b:24:ca:e6:80:93:2b:cd:54:ca:1b:9b:fd:d9:59:d1: - 96:31:25:7b + 96:c2:23:2d:46:d0:3d:23:0e:ab:3d:b6:1e:31:96:00:eb:ae: + 17:ac:6e:c0:d4:1a:8d:0f:36:63:27:02:49:4e:24:cf:d3:80: + 88:3a:4f:d0:f1:e5:1c:df:2d:8a:ab:ae:8d:48:77:a0:d0:dc: + d5:80:1c:a1:3d:0d:49:64:bf:cb:39:84:c9:f3:5d:e0:2d:ba: + a0:f2:ac:03:85:44:a1:97:6b:0b:de:ed:a7:49:19:46:b2:18: + 49:21:62:43:52:36:6f:47:6c:21:6b:5e:41:85:28:71:6c:22: + 27:35:76:82:ed:ac:ad:d7:fa:9d:4c:7d:6f:44:7e:06:dd:8a: + 11:32:0c:d9:d0:f6:63:2a:40:ae:0d:5a:df:9e:d7:91:8a:db: + 2d:95:f3:19:f0:8f:1e:34:e3:b2:31:67:38:74:fd:3f:e6:49: + 5e:53:eb:88:ae:b1:45:71:0e:67:97:3c:99:4e:c7:ea:1e:02: + 67:b4:54:ef:4f:10:55:4a:70:c0:eb:41:e4:50:d4:48:5e:70: + c5:0f:79:f2:06:3d:35:ea:ce:5d:13:8e:14:65:fc:98:21:16: + 2d:5d:6d:f8:e0:6b:c7:c6:e4:8a:ca:c9:38:1f:93:27:86:28: + ef:96:e7:ad:6c:4a:9e:10:78:48:00:f4:4a:43:dc:87:1d:e3: + d3:39:53:68 -----BEGIN CERTIFICATE----- -MIIC7zCCAdcCFAwmFd+PcR1qMdDar2TvgN6smkZ0MA0GCSqGSIb3DQEBCwUAMBEx -DzANBgNVBAMMBkNBUm9vdDAeFw0yMTA0MjMxNzA4NTFaFw0zMTA0MjExNzA4NTFa -MFcxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYw -FAYDVQQLEw1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlzdXBlclVzZXIwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNQ32YQPmwW7yu28ALrSaQluBiOO1o -sXBGO95E+RRRhhDrypDniOj5kYXg3bW0FLl444bVVG1o7BSStPgiWwU97TElZQgF -hMrmDCESWDLHGmCjT9JKnigZfEWEAIyJ3N6K5U+Ikcyk8YFFTH3C/+LBicYSc5Xi -Nr3brotaaGqQUd4riF+qZ/So42PcvhmCzJ1/5o37gr4iAT1WEztbBLToxRjmLg36 -ukqN6MZaoVGaSmLXr920/OLVza6ZbFxhVgvXDBp3XPU6alS1njOsqXUomnav0HpX -ABuREzH9QoghRwUQAS9Zu8c62eFYTBtscbaY790DglijMtyQobamHuELAgMBAAEw -DQYJKoZIhvcNAQELBQADggEBADNAKjhImaD+aE0HOwiur6F86nCrp8gytP+fWlE7 -K6KqIXVEfb7n+wi5geVMzwGG+QZjT856Hcseno/VClRTaZEFECywT9Q6tSUOJUzr -Z2TXhSl3YzDaKnc/WcKM6QJXSZM6UZEasllN1WnJncziT7JsW7pFaMf1GPQduAzr -/QrPEF3cPiZJAzM3QPeWiIKZXDiNzDvetbnu+ayuzgOaHqf4AnMur+ewIss9o8qF -Funm4ta/HBpM6hRJUoRnOJfHszByzMYI5UAKh9oZmCZPC1RDoqDqUbIjiNK0DoJP -ApKk+yfiBhV25yfyouQjeyTK5oCTK81Uyhub/dlZ0ZYxJXs= +MIIDFDCCAfygAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgEwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowVzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQ8wDQYDVQQKEwZBcGFj +aGUxFjAUBgNVBAsTDUFwYWNoZSBQdWxzYXIxEjAQBgNVBAMTCXN1cGVyVXNlcjCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM1DfZhA+bBbvK7bwAutJpCW +4GI47WixcEY73kT5FFGGEOvKkOeI6PmRheDdtbQUuXjjhtVUbWjsFJK0+CJbBT3t +MSVlCAWEyuYMIRJYMscaYKNP0kqeKBl8RYQAjInc3orlT4iRzKTxgUVMfcL/4sGJ +xhJzleI2vduui1poapBR3iuIX6pn9KjjY9y+GYLMnX/mjfuCviIBPVYTO1sEtOjF +GOYuDfq6So3oxlqhUZpKYtev3bT84tXNrplsXGFWC9cMGndc9TpqVLWeM6ypdSia +dq/QelcAG5ETMf1CiCFHBRABL1m7xzrZ4VhMG2xxtpjv3QOCWKMy3JChtqYe4QsC +AwEAAaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB +CwUAA4IBAQCWwiMtRtA9Iw6rPbYeMZYA664XrG7A1BqNDzZjJwJJTiTP04CIOk/Q +8eUc3y2Kq66NSHeg0NzVgByhPQ1JZL/LOYTJ813gLbqg8qwDhUShl2sL3u2nSRlG +shhJIWJDUjZvR2wha15BhShxbCInNXaC7ayt1/qdTH1vRH4G3YoRMgzZ0PZjKkCu +DVrfnteRitstlfMZ8I8eNOOyMWc4dP0/5kleU+uIrrFFcQ5nlzyZTsfqHgJntFTv +TxBVSnDA60HkUNRIXnDFD3nyBj016s5dE44UZfyYIRYtXW344GvHxuSKysk4H5Mn +hijvluetbEqeEHhIAPRKQ9yHHePTOVNo -----END CERTIFICATE----- diff --git a/pulsar-proxy/src/test/resources/authentication/tls/server-cert.pem b/pulsar-proxy/src/test/resources/authentication/tls/server-cert.pem index 0f8bc17b9ed76..c09434c85d20a 100644 --- a/pulsar-proxy/src/test/resources/authentication/tls/server-cert.pem +++ b/pulsar-proxy/src/test/resources/authentication/tls/server-cert.pem @@ -1,13 +1,13 @@ Certificate: Data: - Version: 1 (0x0) + Version: 3 (0x2) Serial Number: - 0c:26:15:df:8f:71:1d:6a:31:d0:da:af:64:ef:80:de:ac:9a:46:75 + 61:e6:1b:07:90:6a:4f:f7:cd:46:b9:59:1d:3e:1c:39:0d:f2:5e:02 Signature Algorithm: sha256WithRSAEncryption Issuer: CN = CARoot Validity - Not Before: Apr 23 17:08:51 2021 GMT - Not After : Apr 21 17:08:51 2031 GMT + Not Before: May 30 13:38:24 2022 GMT + Not After : May 27 13:38:24 2032 GMT Subject: C = US, ST = CA, O = Apache, OU = Apache Pulsar, CN = localhost Subject Public Key Info: Public Key Algorithm: rsaEncryption @@ -32,37 +32,41 @@ Certificate: a0:1a:81:9d:d2:e1:66:dd:c4:cc:fc:63:04:ac:ec: a7:35 Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 Signature Algorithm: sha256WithRSAEncryption - 81:a7:27:69:49:e6:1b:c0:f2:a6:10:c2:ef:c7:64:27:69:53: - 3c:bd:8e:7c:b7:b8:bd:2a:02:d4:ab:4b:f3:7b:25:e8:1e:d8: - 3d:88:00:04:6c:a0:da:67:57:65:5d:a2:b6:1d:9a:8c:c7:bd: - 27:53:78:6a:61:3f:61:c1:23:d5:34:65:f1:49:ec:20:5d:f1: - 01:90:99:e8:e6:99:17:ae:c3:ed:e5:da:c4:f1:8c:89:e8:38: - c1:01:e0:84:27:bf:01:f5:ee:62:87:55:6c:63:fc:45:12:d3: - 2f:f7:e2:b9:f0:33:d0:84:1e:6b:23:7b:3e:ae:25:f6:ff:11: - 12:f4:12:63:b6:88:5d:01:aa:ce:c9:e4:d8:78:a2:2d:4c:9a: - 50:4d:57:80:6a:4b:2d:19:4c:61:21:6a:7a:06:2b:cf:82:ae: - f3:61:b0:ef:62:ae:3b:2d:2d:0d:c8:da:75:49:72:5a:1c:8b: - 15:c2:bb:07:5b:37:81:f6:42:e4:84:29:4c:cb:fc:4d:e1:86: - 9b:86:af:1f:03:08:58:b0:15:4c:72:fd:e6:62:e2:b2:37:ca: - eb:a4:67:ec:12:8f:95:57:d7:e7:cf:fe:b5:f9:4a:55:66:c4: - 2f:af:e9:65:a9:54:a8:9d:1a:1e:9a:9e:ec:60:bf:b5:ef:2b: - b6:d5:02:e9 + 88:89:d7:52:b3:61:49:73:7d:ee:aa:6f:47:11:cd:52:f1:ef: + 9a:63:5f:43:a9:4f:66:c8:36:dd:44:24:ba:4f:c3:6c:94:90: + 85:5e:29:fb:65:cf:03:3b:37:16:5e:88:07:70:97:54:93:f0: + f3:09:d7:65:60:09:00:fd:7f:dd:6a:ab:25:3a:30:c4:89:34: + 43:82:f6:f5:f4:2d:39:3d:21:90:c4:00:27:c5:6a:23:41:20: + c6:42:35:56:91:17:fa:31:90:09:6a:4c:e4:a7:53:ae:61:b6: + d3:5b:82:71:08:d0:0b:af:34:0f:9b:bd:bc:8c:1c:31:43:43: + 97:82:9a:ac:2a:53:ca:11:ce:6f:64:ac:86:c1:f0:62:14:aa: + c3:dd:15:5b:1c:02:6f:bb:40:87:17:b7:e5:9d:93:9a:51:c9: + 1e:7a:8c:d1:22:75:44:f1:9d:90:4b:3e:1f:6c:ab:6f:e3:be: + cd:c7:15:9d:04:84:4a:1b:a7:ac:64:5d:d7:3e:23:98:b9:49: + dd:85:dd:80:4c:46:08:9b:f5:df:eb:19:c8:57:70:ac:43:f9: + d6:9c:1b:1b:2a:94:cf:c1:35:56:a2:f4:b1:00:5d:9e:1e:36: + 54:72:ab:aa:ef:49:b2:f0:dc:cf:5b:22:51:bf:e4:c9:57:dc: + d0:48:0d:f2 -----BEGIN CERTIFICATE----- -MIIC7zCCAdcCFAwmFd+PcR1qMdDar2TvgN6smkZ1MA0GCSqGSIb3DQEBCwUAMBEx -DzANBgNVBAMMBkNBUm9vdDAeFw0yMTA0MjMxNzA4NTFaFw0zMTA0MjExNzA4NTFa -MFcxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYw -FAYDVQQLEw1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvv7ctmK2d9tqjE9RiD5i+HKKJIrpv -1f0fZ+ORA5iAgQ7t2PZwfyw2aD1T6lg6ptWJZku9HldxE21LEeVApXaEJJJAWICW -yR8sxFXro3lzcFw3montL7pr44J8aUoCVIuBXjy/TIrL6ixeg+e3EAhfglijidHa -kroqKO4wKD9brhBxlsfhEsWwGq1Eb0Q6EUqaPA+NBoB7NO8/bPRexURUHsjdx4CF -gNlo5sZTA3fh/hhhB3cFTO1ZvF1BOGrvXaGyYJjUSCiVAooO/c97G9IRzBAMUHPX -zDhsg915JqqQyJuEhrxZ6WJp9JgbxIB4fqAagZ3S4WbdxMz8YwSs7Kc1AgMBAAEw -DQYJKoZIhvcNAQELBQADggEBAIGnJ2lJ5hvA8qYQwu/HZCdpUzy9jny3uL0qAtSr -S/N7Jege2D2IAARsoNpnV2VdorYdmozHvSdTeGphP2HBI9U0ZfFJ7CBd8QGQmejm -mReuw+3l2sTxjInoOMEB4IQnvwH17mKHVWxj/EUS0y/34rnwM9CEHmsjez6uJfb/ -ERL0EmO2iF0Bqs7J5Nh4oi1MmlBNV4BqSy0ZTGEhanoGK8+CrvNhsO9irjstLQ3I -2nVJclocixXCuwdbN4H2QuSEKUzL/E3hhpuGrx8DCFiwFUxy/eZi4rI3yuukZ+wS -j5VX1+fP/rX5SlVmxC+v6WWpVKidGh6anuxgv7XvK7bVAuk= +MIIDFDCCAfygAwIBAgIUYeYbB5BqT/fNRrlZHT4cOQ3yXgIwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGQ0FSb290MB4XDTIyMDUzMDEzMzgyNFoXDTMyMDUyNzEz +MzgyNFowVzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQ8wDQYDVQQKEwZBcGFj +aGUxFjAUBgNVBAsTDUFwYWNoZSBQdWxzYXIxEjAQBgNVBAMTCWxvY2FsaG9zdDCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK+/ty2YrZ322qMT1GIPmL4c +ookium/V/R9n45EDmICBDu3Y9nB/LDZoPVPqWDqm1YlmS70eV3ETbUsR5UCldoQk +kkBYgJbJHyzEVeujeXNwXDeaie0vumvjgnxpSgJUi4FePL9MisvqLF6D57cQCF+C +WKOJ0dqSuioo7jAoP1uuEHGWx+ESxbAarURvRDoRSpo8D40GgHs07z9s9F7FRFQe +yN3HgIWA2WjmxlMDd+H+GGEHdwVM7Vm8XUE4au9dobJgmNRIKJUCig79z3sb0hHM +EAxQc9fMOGyD3XkmqpDIm4SGvFnpYmn0mBvEgHh+oBqBndLhZt3EzPxjBKzspzUC +AwEAAaMeMBwwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB +CwUAA4IBAQCIiddSs2FJc33uqm9HEc1S8e+aY19DqU9myDbdRCS6T8NslJCFXin7 +Zc8DOzcWXogHcJdUk/DzCddlYAkA/X/daqslOjDEiTRDgvb19C05PSGQxAAnxWoj +QSDGQjVWkRf6MZAJakzkp1OuYbbTW4JxCNALrzQPm728jBwxQ0OXgpqsKlPKEc5v +ZKyGwfBiFKrD3RVbHAJvu0CHF7flnZOaUckeeozRInVE8Z2QSz4fbKtv477NxxWd +BIRKG6esZF3XPiOYuUndhd2ATEYIm/Xf6xnIV3CsQ/nWnBsbKpTPwTVWovSxAF2e +HjZUcquq70my8NzPWyJRv+TJV9zQSA3y -----END CERTIFICATE----- diff --git a/pulsar-sql/java-version-trim-agent/pom.xml b/pulsar-sql/java-version-trim-agent/pom.xml new file mode 100644 index 0000000000000..f564229b33013 --- /dev/null +++ b/pulsar-sql/java-version-trim-agent/pom.xml @@ -0,0 +1,58 @@ + + + + pulsar-sql + org.apache.pulsar + 2.9.3 + + 4.0.0 + + java-version-trim-agent + Pulsar SQL :: Java Version Trim Agent + + + java-version-trim-agent + + + org.apache.maven.plugins + maven-jar-plugin + 3.1.0 + + + + true + + + org.apache.pulsar.sql.agent.TrimJavaVersionAgent + org.apache.pulsar.sql.agent.TrimJavaVersionAgent + true + true + + + + + + + + \ No newline at end of file diff --git a/pulsar-sql/java-version-trim-agent/src/main/java/org/apache/pulsar/sql/agent/TrimJavaVersionAgent.java b/pulsar-sql/java-version-trim-agent/src/main/java/org/apache/pulsar/sql/agent/TrimJavaVersionAgent.java new file mode 100644 index 0000000000000..7cb422bd27a13 --- /dev/null +++ b/pulsar-sql/java-version-trim-agent/src/main/java/org/apache/pulsar/sql/agent/TrimJavaVersionAgent.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.sql.agent; + +import java.lang.instrument.Instrumentation; +import java.util.logging.Logger; + +/** + * The presto 332 couldn't parse Java version like this `11.0.14.1`, + * so add java version trim agent to walk around the problem. + * + * After the presto upgrade to 332+, we could remove this. + */ +public class TrimJavaVersionAgent { + + private static final Logger logger = Logger.getLogger(TrimJavaVersionAgent.class.getName()); + + private static final String JAVA_VERSION = "java.version"; + + public static String trimJavaVersion(String javaVersion) { + String[] arr = javaVersion.split("\\."); + if (arr.length <= 3) { + return javaVersion; + } + return arr[0] + "." + arr[1] + "." + arr[2]; + } + + public static void premain(String agentArgs, Instrumentation inst) { + String javaVersion = System.getProperty(JAVA_VERSION); + String trimVersion = trimJavaVersion(javaVersion); + logger.info("original java version " + javaVersion + " => trim java version " + trimVersion); + System.setProperty(JAVA_VERSION, trimVersion); + } + +} diff --git a/pulsar-sql/java-version-trim-agent/src/main/java/org/apache/pulsar/sql/agent/package-info.java b/pulsar-sql/java-version-trim-agent/src/main/java/org/apache/pulsar/sql/agent/package-info.java new file mode 100644 index 0000000000000..7d60a6fc6fbb2 --- /dev/null +++ b/pulsar-sql/java-version-trim-agent/src/main/java/org/apache/pulsar/sql/agent/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Implementation of the connector to the Presto engine. + */ +package org.apache.pulsar.sql.agent; diff --git a/pulsar-sql/pom.xml b/pulsar-sql/pom.xml index 296d6f1330de1..14648f39852a9 100644 --- a/pulsar-sql/pom.xml +++ b/pulsar-sql/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-sql @@ -34,75 +34,51 @@ presto-pulsar presto-pulsar-plugin + java-version-trim-agent presto-distribution + + + 3.14.9 + + 1.17.2 + + + - com.fasterxml.jackson.jaxrs - jackson-jaxrs-json-provider - ${jackson.version} - - - - com.fasterxml.jackson.core - jackson-core - ${jackson.version} - - - - com.fasterxml.jackson.core - jackson-databind - ${jackson.databind.version} - - - - com.fasterxml.jackson.core - jackson-annotations - ${jackson.version} + com.squareup.okhttp3 + okhttp + ${okhttp3.version} - - - com.fasterxml.jackson.jaxrs - jackson-jaxrs-base - ${jackson.version} - - - - com.fasterxml.jackson.datatype - jackson-datatype-joda - ${jackson.version} - - - com.fasterxml.jackson.dataformat - jackson-dataformat-yaml - ${jackson.version} + com.squareup.okhttp3 + okhttp-urlconnection + ${okhttp3.version} - - com.fasterxml.jackson.module - jackson-module-jsonSchema - ${jackson.version} + com.squareup.okhttp3 + logging-interceptor + ${okhttp3.version} - - com.fasterxml.jackson.datatype - jackson-datatype-guava - ${jackson.version} + com.squareup.okio + okio + ${okio.version} + - com.fasterxml.jackson.datatype - jackson-datatype-jdk8 - ${jackson.version} + org.slf4j + log4j-over-slf4j + ${slf4j.version} - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - ${jackson.version} + org.slf4j + slf4j-jdk14 + ${slf4j.version} diff --git a/pulsar-sql/presto-distribution/LICENSE b/pulsar-sql/presto-distribution/LICENSE index d35661fa22a4b..39ff98aff3b2c 100644 --- a/pulsar-sql/presto-distribution/LICENSE +++ b/pulsar-sql/presto-distribution/LICENSE @@ -207,68 +207,74 @@ This projects includes binary packages with the following licenses: The Apache Software License, Version 2.0 * Jackson - - jackson-annotations-2.12.3.jar - - jackson-core-2.12.3.jar - - jackson-databind-2.12.3.jar - - jackson-dataformat-smile-2.12.3.jar - - jackson-datatype-guava-2.12.3.jar - - jackson-datatype-jdk8-2.12.3.jar - - jackson-datatype-joda-2.12.3.jar - - jackson-datatype-jsr310-2.12.3.jar - - jackson-dataformat-yaml-2.12.3.jar - - jackson-jaxrs-base-2.12.3.jar - - jackson-jaxrs-json-provider-2.12.3.jar - - jackson-module-jaxb-annotations-2.12.3.jar - - jackson-module-jsonSchema-2.12.3.jar + - jackson-annotations-2.13.4.jar + - jackson-core-2.13.4.jar + - jackson-databind-2.13.4.jar + - jackson-dataformat-smile-2.13.4.jar + - jackson-datatype-guava-2.13.4.jar + - jackson-datatype-jdk8-2.13.4.jar + - jackson-datatype-joda-2.13.4.jar + - jackson-datatype-jsr310-2.13.4.jar + - jackson-dataformat-yaml-2.13.4.jar + - jackson-jaxrs-base-2.13.4.jar + - jackson-jaxrs-json-provider-2.13.4.jar + - jackson-module-jaxb-annotations-2.13.4.jar + - jackson-module-jsonSchema-2.13.4.jar * Guava - guava-30.1-jre.jar - listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar - failureaccess-1.0.1.jar * Google Guice - - guice-4.2.3.jar - - guice-multibindings-4.2.0.jar + - guice-5.1.0.jar * Apache Commons - commons-math3-3.6.1.jar - commons-compress-1.21.jar - commons-lang3-3.11.jar * Netty - netty-3.10.6.Final.jar - - netty-buffer-4.1.68.Final.jar - - netty-codec-4.1.68.Final.jar - - netty-codec-dns-4.1.68.Final.jar - - netty-codec-http-4.1.68.Final.jar - - netty-codec-haproxy-4.1.68.Final.jar - - netty-codec-socks-4.1.68.Final.jar - - netty-handler-proxy-4.1.68.Final.jar - - netty-common-4.1.68.Final.jar - - netty-handler-4.1.68.Final.jar - - netty-reactive-streams-2.0.4.jar - - netty-resolver-4.1.68.Final.jar - - netty-resolver-dns-4.1.68.Final.jar - - netty-tcnative-boringssl-static-2.0.42.Final.jar - - netty-transport-4.1.68.Final.jar - - netty-transport-native-epoll-4.1.68.Final-linux-x86_64.jar - - netty-transport-native-unix-common-4.1.68.Final.jar - - netty-transport-native-unix-common-4.1.68.Final-linux-x86_64.jar + - netty-buffer-4.1.77.Final.jar + - netty-codec-4.1.77.Final.jar + - netty-codec-dns-4.1.77.Final.jar + - netty-codec-http-4.1.77.Final.jar + - netty-codec-haproxy-4.1.77.Final.jar + - netty-codec-socks-4.1.77.Final.jar + - netty-handler-proxy-4.1.77.Final.jar + - netty-common-4.1.77.Final.jar + - netty-handler-4.1.77.Final.jar + - netty-reactive-streams-2.0.6.jar + - netty-resolver-4.1.77.Final.jar + - netty-resolver-dns-4.1.77.Final.jar + - netty-tcnative-boringssl-static-2.0.52.Final.jar + - netty-tcnative-boringssl-static-2.0.52.Final-linux-aarch_64.jar + - netty-tcnative-boringssl-static-2.0.52.Final-linux-x86_64.jar + - netty-tcnative-boringssl-static-2.0.52.Final-osx-aarch_64.jar + - netty-tcnative-boringssl-static-2.0.52.Final-osx-x86_64.jar + - netty-tcnative-boringssl-static-2.0.52.Final-windows-x86_64.jar + - netty-tcnative-classes-2.0.52.Final.jar + - netty-transport-4.1.77.Final.jar + - netty-transport-classes-epoll-4.1.77.Final.jar + - netty-transport-native-epoll-4.1.77.Final-linux-x86_64.jar + - netty-transport-native-unix-common-4.1.77.Final.jar + - netty-transport-native-unix-common-4.1.77.Final-linux-x86_64.jar * Joda Time - joda-time-2.10.5.jar * Jetty - - http2-client-9.4.43.v20210629.jar - - http2-common-9.4.43.v20210629.jar - - http2-hpack-9.4.43.v20210629.jar - - http2-http-client-transport-9.4.43.v20210629.jar - - jetty-alpn-client-9.4.43.v20210629.jar - - http2-server-9.4.43.v20210629.jar - - jetty-alpn-java-client-9.4.43.v20210629.jar - - jetty-client-9.4.43.v20210629.jar - - jetty-http-9.4.43.v20210629.jar - - jetty-io-9.4.43.v20210629.jar - - jetty-jmx-9.4.43.v20210629.jar - - jetty-security-9.4.43.v20210629.jar - - jetty-server-9.4.43.v20210629.jar - - jetty-servlet-9.4.43.v20210629.jar - - jetty-util-9.4.43.v20210629.jar - - jetty-util-ajax-9.4.43.v20210629.jar + - http2-client-9.4.48.v20220622.jar + - http2-common-9.4.48.v20220622.jar + - http2-hpack-9.4.48.v20220622.jar + - http2-http-client-transport-9.4.48.v20220622.jar + - jetty-alpn-client-9.4.48.v20220622.jar + - http2-server-9.4.48.v20220622.jar + - jetty-alpn-java-client-9.4.48.v20220622.jar + - jetty-client-9.4.48.v20220622.jar + - jetty-http-9.4.48.v20220622.jar + - jetty-io-9.4.48.v20220622.jar + - jetty-jmx-9.4.48.v20220622.jar + - jetty-security-9.4.48.v20220622.jar + - jetty-server-9.4.48.v20220622.jar + - jetty-servlet-9.4.48.v20220622.jar + - jetty-util-9.4.48.v20220622.jar + - jetty-util-ajax-9.4.48.v20220622.jar * Apache BVal - bval-jsr-2.0.0.jar * Bytecode @@ -327,8 +333,7 @@ The Apache Software License, Version 2.0 - leveldb-0.10.jar - leveldb-api-0.10.jar * Log4j implemented over SLF4J - - log4j-over-slf4j-1.7.29.jar - - log4j-over-slf4j-1.7.30.jar + - log4j-over-slf4j-1.7.32.jar * Lucene Common Analyzers - lucene-analyzers-common-8.4.1.jar - lucene-core-8.4.1.jar @@ -391,9 +396,9 @@ The Apache Software License, Version 2.0 - presto-spi-332.jar - presto-record-decoder-332.jar * RocksDB JNI - - rocksdbjni-6.10.2.jar + - rocksdbjni-6.16.4.jar * SnakeYAML - - snakeyaml-1.27.jar + - snakeyaml-1.31.jar * Bean Validation API - validation-api-2.0.1.Final.jar * Objectsize @@ -414,20 +419,20 @@ The Apache Software License, Version 2.0 - async-http-client-2.12.1.jar - async-http-client-netty-utils-2.12.1.jar * Apache Bookkeeper - - bookkeeper-common-4.14.2.jar - - bookkeeper-common-allocator-4.14.2.jar - - bookkeeper-proto-4.14.2.jar - - bookkeeper-server-4.14.2.jar - - bookkeeper-stats-api-4.14.2.jar - - bookkeeper-tools-framework-4.14.2.jar - - circe-checksum-4.14.2.jar - - codahale-metrics-provider-4.14.2.jar - - cpu-affinity-4.14.2.jar - - http-server-4.14.2.jar - - prometheus-metrics-provider-4.14.2.jar - - codahale-metrics-provider-4.14.2.jar + - bookkeeper-common-4.14.5.jar + - bookkeeper-common-allocator-4.14.5.jar + - bookkeeper-proto-4.14.5.jar + - bookkeeper-server-4.14.5.jar + - bookkeeper-stats-api-4.14.5.jar + - bookkeeper-tools-framework-4.14.5.jar + - circe-checksum-4.14.5.jar + - codahale-metrics-provider-4.14.5.jar + - cpu-affinity-4.14.5.jar + - http-server-4.14.5.jar + - prometheus-metrics-provider-4.14.5.jar + - codahale-metrics-provider-4.14.5.jar * Apache Commons - - commons-cli-1.2.jar + - commons-cli-1.5.0.jar - commons-codec-1.15.jar - commons-collections4-4.1.jar - commons-configuration-1.10.jar @@ -435,11 +440,11 @@ The Apache Software License, Version 2.0 - commons-lang-2.6.jar - commons-logging-1.2.jar * GSON - - gson-2.8.6.jar + - gson-2.8.9.jar * Snappy - snappy-java-1.1.7.jar * Jackson - - jackson-module-parameter-names-2.12.3.jar + - jackson-module-parameter-names-2.13.4.jar * Java Assist - javassist-3.25.0-GA.jar * Java Native Access @@ -459,7 +464,7 @@ The Apache Software License, Version 2.0 Protocol Buffers License * Protocol Buffers - - protobuf-java-3.11.4.jar + - protobuf-java-3.19.2.jar BSD 3-clause "New" or "Revised" License * RE2J TD -- re2j-td-1.4.jar @@ -482,13 +487,12 @@ MIT License * PCollections - pcollections-2.1.2.jar * SLF4J - - slf4j-jdk14-1.7.29.jar - - slf4j-api-1.7.25.jar - - slf4j-jdk14-1.7.30.jar + - slf4j-api-1.7.32.jar + - slf4j-jdk14-1.7.32.jar * JCL 1.2 Implemented Over SLF4J - - jcl-over-slf4j-1.7.25.jar + - jcl-over-slf4j-1.7.32.jar * JUL to SLF4J Bridge - - jul-to-slf4j-1.7.25.jar + - jul-to-slf4j-1.7.32.jar * Checker Qual - checker-qual-3.5.0.jar diff --git a/pulsar-sql/presto-distribution/pom.xml b/pulsar-sql/presto-distribution/pom.xml index a0ded27662cc5..b1b3d4281d409 100644 --- a/pulsar-sql/presto-distribution/pom.xml +++ b/pulsar-sql/presto-distribution/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-sql - 2.9.0-SNAPSHOT + 2.9.3 pulsar-presto-distribution @@ -38,11 +38,10 @@ 0.170 2.6 0.0.12 - 4.2.0 - 2.12.3 + 2.13.2 - 2.12.3 + 2.13.2.1 3.0.5 30.1-jre 2.12.1 @@ -100,6 +99,10 @@ javax.activation activation + + com.google.inject.extensions + guice-multibindings + @@ -137,67 +140,58 @@ ${objectsize.version} - - - com.google.inject.extensions - guice-multibindings - ${guice.version} - - com.fasterxml.jackson.core jackson-core - ${jackson.version} com.fasterxml.jackson.core jackson-databind - ${jackson.databind.version} com.fasterxml.jackson.core jackson-annotations - ${jackson.version} com.fasterxml.jackson.datatype jackson-datatype-joda - ${jackson.version} com.fasterxml.jackson.dataformat jackson-dataformat-yaml - ${jackson.version} com.fasterxml.jackson.datatype jackson-datatype-guava - ${jackson.version} com.fasterxml.jackson.datatype jackson-datatype-jdk8 - ${jackson.version} com.fasterxml.jackson.datatype jackson-datatype-jsr310 - ${jackson.version} com.fasterxml.jackson.dataformat jackson-dataformat-smile - ${jackson.version} + + + + ${project.groupId} + java-version-trim-agent + ${project.version} + provided diff --git a/pulsar-sql/presto-distribution/src/assembly/assembly.xml b/pulsar-sql/presto-distribution/src/assembly/assembly.xml index 4f1bac20dcee4..bc1fe5ed46037 100644 --- a/pulsar-sql/presto-distribution/src/assembly/assembly.xml +++ b/pulsar-sql/presto-distribution/src/assembly/assembly.xml @@ -40,6 +40,11 @@ bin/ 644 + + ${basedir}/../java-version-trim-agent/target/java-version-trim-agent.jar + java-version-trim-agent.jar + / + diff --git a/pulsar-sql/presto-pulsar-plugin/pom.xml b/pulsar-sql/presto-pulsar-plugin/pom.xml index 1cad798fc96e0..a3ef46f0e8741 100644 --- a/pulsar-sql/presto-pulsar-plugin/pom.xml +++ b/pulsar-sql/presto-pulsar-plugin/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-sql - 2.9.0-SNAPSHOT + 2.9.3 pulsar-presto-connector diff --git a/pulsar-sql/presto-pulsar/pom.xml b/pulsar-sql/presto-pulsar/pom.xml index d62d947e54a75..dc5abfacde221 100644 --- a/pulsar-sql/presto-pulsar/pom.xml +++ b/pulsar-sql/presto-pulsar/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar-sql - 2.9.0-SNAPSHOT + 2.9.3 pulsar-presto-connector-original @@ -107,6 +107,11 @@ ${presto.version} + + javax.annotation + javax.annotation-api + + io.prestosql presto-main diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarConnectorCache.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarConnectorCache.java index bf823de7229f9..9a64c055d07ac 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarConnectorCache.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarConnectorCache.java @@ -42,6 +42,7 @@ import org.apache.pulsar.PulsarVersion; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl; +import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.metadata.api.MetadataStoreConfig; import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended; @@ -109,7 +110,8 @@ private ManagedLedgerFactory initManagedLedgerFactory(PulsarConnectorConfig puls .setReadEntryTimeout(60) .setThrottleValue(pulsarConnectorConfig.getBookkeeperThrottleValue()) .setNumIOThreads(pulsarConnectorConfig.getBookkeeperNumIOThreads()) - .setNumWorkerThreads(pulsarConnectorConfig.getBookkeeperNumWorkerThreads()); + .setNumWorkerThreads(pulsarConnectorConfig.getBookkeeperNumWorkerThreads()) + .setNettyMaxFrameSizeBytes(pulsarConnectorConfig.getMaxMessageSize() + Commands.MESSAGE_SIZE_FRAME_PADDING); ManagedLedgerFactoryConfig managedLedgerFactoryConfig = new ManagedLedgerFactoryConfig(); managedLedgerFactoryConfig.setMaxCacheSize(pulsarConnectorConfig.getManagedLedgerCacheSizeMB()); diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java index f1e2bdb4bb4bf..e70f574cf29e2 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java @@ -29,6 +29,9 @@ import io.airlift.log.Logger; import io.airlift.slice.Slice; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.util.Recycler; +import io.netty.util.ReferenceCountUtil; import io.prestosql.decoder.DecoderColumnHandle; import io.prestosql.decoder.FieldValueProvider; import io.prestosql.spi.block.Block; @@ -42,7 +45,9 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.Entry; @@ -58,13 +63,17 @@ import org.apache.pulsar.client.impl.schema.KeyValueSchemaInfo; import org.apache.pulsar.common.api.raw.MessageParser; import org.apache.pulsar.common.api.raw.RawMessage; +import org.apache.pulsar.common.api.raw.RawMessageIdImpl; +import org.apache.pulsar.common.api.raw.RawMessageImpl; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl; +import org.apache.pulsar.common.protocol.schema.BytesSchemaVersion; import org.apache.pulsar.common.schema.KeyValue; import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; +import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; import org.apache.pulsar.sql.presto.util.CacheSizeAllocator; import org.apache.pulsar.sql.presto.util.NoStrictCacheSizeAllocator; import org.apache.pulsar.sql.presto.util.NullCacheSizeAllocator; @@ -103,6 +112,7 @@ public class PulsarRecordCursor implements RecordCursor { private final long splitSize; private long entriesProcessed = 0; private int partition = -1; + private volatile Throwable deserializingError; private PulsarSqlSchemaInfoProvider schemaInfoProvider; @@ -111,6 +121,9 @@ public class PulsarRecordCursor implements RecordCursor { PulsarDispatchingRowDecoderFactory decoderFactory; + protected ConcurrentOpenHashMap chunkedMessagesMap = + ConcurrentOpenHashMap.newBuilder().build(); + private static final Logger log = Logger.get(PulsarRecordCursor.class); public PulsarRecordCursor(List columnHandles, PulsarSplit pulsarSplit, @@ -227,106 +240,133 @@ public void setPulsarSqlSchemaInfoProvider(PulsarSqlSchemaInfoProvider schemaInf } @VisibleForTesting - class DeserializeEntries implements Runnable { + class DeserializeEntries extends Thread { - protected boolean isRunning = false; + private final AtomicBoolean isRunning; - private final Thread thread; + private final CompletableFuture closeHandle; public DeserializeEntries() { - this.thread = new Thread(this, "derserialize-thread-split-" + pulsarSplit.getSplitId()); + super("deserialize-thread-split-" + pulsarSplit.getSplitId()); + this.isRunning = new AtomicBoolean(false); + this.closeHandle = new CompletableFuture<>(); } - public void interrupt() { - isRunning = false; - thread.interrupt(); + @Override + public void start() { + if (isRunning.compareAndSet(false, true)) { + super.start(); + } } - public void start() { - this.thread.start(); + public CompletableFuture close() { + if (isRunning.compareAndSet(true, false)) { + super.interrupt(); + } + return closeHandle; } @Override public void run() { - isRunning = true; - while (isRunning) { - - int read = entryQueue.drain(new MessagePassingQueue.Consumer() { - @Override - public void accept(Entry entry) { - - try { - entryQueueCacheSizeAllocator.release(entry.getLength()); - - long bytes = entry.getDataBuffer().readableBytes(); - completedBytes += bytes; - // register stats for bytes read - metricsTracker.register_BYTES_READ(bytes); - - // check if we have processed all entries in this split - if (((PositionImpl) entry.getPosition()).compareTo(pulsarSplit.getEndPosition()) >= 0) { - return; - } - - // set start time for time deserializing entries for stats - metricsTracker.start_ENTRY_DESERIALIZE_TIME(); + try { + while (isRunning.get()) { + int read = entryQueue.drain(new MessagePassingQueue.Consumer() { + @Override + public void accept(Entry entry) { try { - MessageParser.parseMessage(topicName, entry.getLedgerId(), entry.getEntryId(), - entry.getDataBuffer(), (message) -> { - try { - // start time for message queue read - metricsTracker.start_MESSAGE_QUEUE_ENQUEUE_WAIT_TIME(); - - while (true) { - if (!haveAvailableCacheSize( - messageQueueCacheSizeAllocator, messageQueue) - || !messageQueue.offer(message)) { - Thread.sleep(1); - } else { - messageQueueCacheSizeAllocator.allocate( - message.getData().readableBytes()); - break; + entryQueueCacheSizeAllocator.release(entry.getLength()); + + long bytes = entry.getDataBuffer().readableBytes(); + completedBytes += bytes; + // register stats for bytes read + metricsTracker.register_BYTES_READ(bytes); + + // check if we have processed all entries in this split + // and no incomplete chunked messages exist + if (entryExceedSplitEndPosition(entry) && chunkedMessagesMap.isEmpty()) { + return; + } + + // set start time for time deserializing entries for stats + metricsTracker.start_ENTRY_DESERIALIZE_TIME(); + + try { + MessageParser.parseMessage(topicName, entry.getLedgerId(), entry.getEntryId(), + entry.getDataBuffer(), (message) -> { + try { + // start time for message queue read + metricsTracker.start_MESSAGE_QUEUE_ENQUEUE_WAIT_TIME(); + + if (message.getNumChunksFromMsg() > 1) { + message = processChunkedMessages(message); + } else if (entryExceedSplitEndPosition(entry)) { + // skip no chunk or no multi chunk message + // that exceed split end position + message.release(); + message = null; + } + if (message != null) { + while (true) { + if (!haveAvailableCacheSize( + messageQueueCacheSizeAllocator, messageQueue) + || !messageQueue.offer(message)) { + Thread.sleep(1); + } else { + messageQueueCacheSizeAllocator.allocate( + message.getData().readableBytes()); + break; + } + } } - } - - // stats for how long a read from message queue took - metricsTracker.end_MESSAGE_QUEUE_ENQUEUE_WAIT_TIME(); - // stats for number of messages read - metricsTracker.incr_NUM_MESSAGES_DESERIALIZED_PER_ENTRY(); - - } catch (InterruptedException e) { - //no-op - } - }, pulsarConnectorConfig.getMaxMessageSize()); - } catch (IOException e) { - log.error(e, "Failed to parse message from pulsar topic %s", topicName.toString()); - throw new RuntimeException(e); - } - // stats for time spend deserializing entries - metricsTracker.end_ENTRY_DESERIALIZE_TIME(); - // stats for num messages per entry - metricsTracker.end_NUM_MESSAGES_DESERIALIZED_PER_ENTRY(); + // stats for how long a read from message queue took + metricsTracker.end_MESSAGE_QUEUE_ENQUEUE_WAIT_TIME(); + // stats for number of messages read + metricsTracker.incr_NUM_MESSAGES_DESERIALIZED_PER_ENTRY(); - } finally { - entriesProcessed++; - entry.release(); + } catch (InterruptedException e) { + //no-op + } + }, pulsarConnectorConfig.getMaxMessageSize()); + } catch (IOException e) { + log.error(e, "Failed to parse message from pulsar topic %s", topicName.toString()); + throw new RuntimeException(e); + } + // stats for time spend deserializing entries + metricsTracker.end_ENTRY_DESERIALIZE_TIME(); + + // stats for num messages per entry + metricsTracker.end_NUM_MESSAGES_DESERIALIZED_PER_ENTRY(); + + } finally { + entriesProcessed++; + entry.release(); + } } - } - }); + }); - if (read <= 0) { - try { - Thread.sleep(1); - } catch (InterruptedException e) { - return; + if (read <= 0) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + return; + } } } + closeHandle.complete(null); + } catch (Throwable ex) { + log.error(ex, "Stop running DeserializeEntries"); + closeHandle.completeExceptionally(ex); + throw ex; } } } + private boolean entryExceedSplitEndPosition(Entry entry) { + return ((PositionImpl) entry.getPosition()).compareTo(pulsarSplit.getEndPosition()) >= 0; + } + @VisibleForTesting class ReadEntries implements AsyncCallbacks.ReadEntriesCallback { @@ -340,8 +380,9 @@ class ReadEntries implements AsyncCallbacks.ReadEntriesCallback { public void run() { if (outstandingReadsRequests.get() > 0) { - if (!cursor.hasMoreEntries() || ((PositionImpl) cursor.getReadPosition()) - .compareTo(pulsarSplit.getEndPosition()) >= 0) { + if (!cursor.hasMoreEntries() || + (((PositionImpl) cursor.getReadPosition()).compareTo(pulsarSplit.getEndPosition()) >= 0 + && chunkedMessagesMap.isEmpty())) { isDone = true; } else { @@ -407,12 +448,14 @@ public Entry get() { public boolean hasFinished() { return messageQueue.isEmpty() && isDone && outstandingReadsRequests.get() >= 1 - && splitSize <= entriesProcessed; + && splitSize <= entriesProcessed && chunkedMessagesMap.isEmpty(); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { - log.debug(exception, "Failed to read entries from topic %s", topicName.toString()); + if (log.isDebugEnabled()) { + log.debug(exception, "Failed to read entries from topic %s", topicName.toString()); + } outstandingReadsRequests.incrementAndGet(); //set read latency stats for failed @@ -441,6 +484,9 @@ public boolean advanceNextPosition() { if (readEntries == null) { // start deserialize thread deserializeEntries = new DeserializeEntries(); + deserializeEntries.setUncaughtExceptionHandler((t, ex) -> { + deserializingError = ex; + }); deserializeEntries.start(); readEntries = new ReadEntries(); @@ -465,6 +511,8 @@ public boolean advanceNextPosition() { if (currentMessage != null) { messageQueueCacheSizeAllocator.release(currentMessage.getData().readableBytes()); break; + } else if (deserializingError != null) { + throw new RuntimeException(deserializingError); } else { try { Thread.sleep(1); @@ -476,17 +524,10 @@ public boolean advanceNextPosition() { } } - //start time for deseralizing record + //start time for deserializing record metricsTracker.start_RECORD_DESERIALIZE_TIME(); - SchemaInfo schemaInfo = getBytesSchemaInfo(pulsarSplit.getSchemaType(), pulsarSplit.getSchemaName()); - try { - if (schemaInfo == null) { - schemaInfo = schemaInfoProvider.getSchemaByVersion(this.currentMessage.getSchemaVersion()).get(); - } - } catch (InterruptedException | ExecutionException e) { - throw new RuntimeException(e); - } + SchemaInfo schemaInfo = getSchemaInfo(pulsarSplit); Map currentRowValuesMap = new HashMap<>(); @@ -560,7 +601,7 @@ public boolean advanceNextPosition() { currentRowValuesMap.put(columnHandle, longValueProvider(this.partition)); } else if (PulsarInternalColumn.EVENT_TIME.getName().equals(columnHandle.getName())) { currentRowValuesMap.put(columnHandle, PulsarFieldValueProviders.timeValueProvider( - this.currentMessage.getEventTime(), this.currentMessage.getPublishTime() == 0)); + this.currentMessage.getEventTime(), this.currentMessage.getEventTime() == 0)); } else if (PulsarInternalColumn.PUBLISH_TIME.getName().equals(columnHandle.getName())) { currentRowValuesMap.put(columnHandle, PulsarFieldValueProviders.timeValueProvider( this.currentMessage.getPublishTime(), this.currentMessage.getPublishTime() == 0)); @@ -600,6 +641,38 @@ public boolean advanceNextPosition() { return true; } + /** + * Get the schemaInfo of the message. + * + * 1. If the schema type of pulsarSplit is NONE or BYTES, use the BYTES schema. + * 2. If the schema type of pulsarSplit is BYTEBUFFER, use the BYTEBUFFER schema. + * 3. If the schema version of the message is null, use the schema info of pulsarSplit. + * 4. If the schema version of the message is not null, get the specific version schema by PulsarAdmin. + * 5. If the final schema is null throw a runtime exception. + */ + private SchemaInfo getSchemaInfo(PulsarSplit pulsarSplit) { + SchemaInfo schemaInfo = getBytesSchemaInfo(pulsarSplit.getSchemaType(), pulsarSplit.getSchemaName()); + if (schemaInfo != null) { + return schemaInfo; + } + try { + if (this.currentMessage.getSchemaVersion() == null) { + schemaInfo = pulsarSplit.getSchemaInfo(); + } else { + schemaInfo = schemaInfoProvider.getSchemaByVersion(this.currentMessage.getSchemaVersion()).get(); + } + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + if (schemaInfo == null) { + String schemaVersion = this.currentMessage.getSchemaVersion() == null + ? "null" : BytesSchemaVersion.of(this.currentMessage.getSchemaVersion()).toString(); + throw new RuntimeException("The specific version (" + schemaVersion + ") schema of the table " + + pulsarSplit.getTableName() + " is null"); + } + return schemaInfo; + } + private SchemaInfo getBytesSchemaInfo(SchemaType schemaType, String schemaName) { if (!schemaType.equals(SchemaType.BYTES) && !schemaType.equals(SchemaType.NONE)) { return null; @@ -654,20 +727,18 @@ public boolean isNull(int field) { public void close() { log.info("Closing cursor record"); - if (currentMessage != null) { - currentMessage.release(); - } - - if (messageQueue != null) { - messageQueue.drain(RawMessage::release); - } - - if (entryQueue != null) { - entryQueue.drain(Entry::release); - } - if (deserializeEntries != null) { - deserializeEntries.interrupt(); + deserializeEntries.close().whenComplete((r, t) -> { + if (entryQueue != null) { + entryQueue.drain(Entry::release); + } + if (messageQueue != null) { + messageQueue.drain(RawMessage::release); + } + if (currentMessage != null) { + currentMessage.release(); + } + }); } if (this.cursor != null) { @@ -706,4 +777,95 @@ private void initEntryCacheSizeAllocator(PulsarConnectorConfig connectorConfig) } } + private RawMessage processChunkedMessages(RawMessage message) { + final String uuid = message.getUUID(); + final int chunkId = message.getChunkId(); + final int totalChunkMsgSize = message.getTotalChunkMsgSize(); + final int numChunks = message.getNumChunksFromMsg(); + + RawMessageIdImpl rawMessageId = (RawMessageIdImpl) message.getMessageId(); + if (rawMessageId.getLedgerId() > pulsarSplit.getEndPositionLedgerId() + && !chunkedMessagesMap.containsKey(uuid)) { + // If the message is out of the split range, we only care about the incomplete chunked messages. + message.release(); + return null; + } + if (chunkId == 0) { + ByteBuf chunkedMsgBuffer = Unpooled.directBuffer(totalChunkMsgSize, totalChunkMsgSize); + chunkedMessagesMap.computeIfAbsent(uuid, (key) -> ChunkedMessageCtx.get(numChunks, chunkedMsgBuffer)); + } + + ChunkedMessageCtx chunkedMsgCtx = chunkedMessagesMap.get(uuid); + if (chunkedMsgCtx == null || chunkedMsgCtx.chunkedMsgBuffer == null + || chunkId != (chunkedMsgCtx.lastChunkedMessageId + 1) || chunkId >= numChunks) { + // Means we lost the first chunk, it will happen when the beginning chunk didn't belong to this split. + log.info("Received unexpected chunk. messageId: %s, last-chunk-id: %s chunkId: %s, totalChunks: %s", + message.getMessageId(), + (chunkedMsgCtx != null ? chunkedMsgCtx.lastChunkedMessageId : null), chunkId, + numChunks); + if (chunkedMsgCtx != null) { + if (chunkedMsgCtx.chunkedMsgBuffer != null) { + ReferenceCountUtil.safeRelease(chunkedMsgCtx.chunkedMsgBuffer); + } + chunkedMsgCtx.recycle(); + } + chunkedMessagesMap.remove(uuid); + message.release(); + return null; + } + + // append the chunked payload and update lastChunkedMessage-id + chunkedMsgCtx.chunkedMsgBuffer.writeBytes(message.getData()); + chunkedMsgCtx.lastChunkedMessageId = chunkId; + + // if final chunk is not received yet then release payload and return + if (chunkId != (numChunks - 1)) { + message.release(); + return null; + } + + if (log.isDebugEnabled()) { + log.debug("Chunked message completed. chunkId: %s, totalChunks: %s, msgId: %s, sequenceId: %s", + chunkId, numChunks, rawMessageId, message.getSequenceId()); + } + chunkedMessagesMap.remove(uuid); + ByteBuf unCompressedPayload = chunkedMsgCtx.chunkedMsgBuffer; + chunkedMsgCtx.recycle(); + // The chunked message complete, we use the entire payload to instead of the last chunk payload. + return ((RawMessageImpl) message).updatePayloadForChunkedMessage(unCompressedPayload); + } + + static class ChunkedMessageCtx { + + protected int totalChunks = -1; + protected ByteBuf chunkedMsgBuffer; + protected int lastChunkedMessageId = -1; + + static ChunkedMessageCtx get(int numChunksFromMsg, ByteBuf chunkedMsgBuffer) { + ChunkedMessageCtx ctx = RECYCLER.get(); + ctx.totalChunks = numChunksFromMsg; + ctx.chunkedMsgBuffer = chunkedMsgBuffer; + return ctx; + } + + private final Recycler.Handle recyclerHandle; + + private ChunkedMessageCtx(Recycler.Handle recyclerHandle) { + this.recyclerHandle = recyclerHandle; + } + + private static final Recycler RECYCLER = new Recycler() { + protected ChunkedMessageCtx newObject(Recycler.Handle handle) { + return new ChunkedMessageCtx(handle); + } + }; + + public void recycle() { + this.totalChunks = -1; + this.chunkedMsgBuffer = null; + this.lastChunkedMessageId = -1; + recyclerHandle.recycle(this); + } + } + } diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplit.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplit.java index 645edbd84f2d0..03a6b771bbaa1 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplit.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSplit.java @@ -32,7 +32,6 @@ import java.util.List; import java.util.Map; import org.apache.bookkeeper.mledger.impl.PositionImpl; -import org.apache.pulsar.client.impl.schema.SchemaInfoImpl; import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl; import org.apache.pulsar.common.schema.SchemaInfo; import org.apache.pulsar.common.schema.SchemaType; @@ -102,7 +101,7 @@ public PulsarSplit( this.offloadPolicies = offloadPolicies; ObjectMapper objectMapper = new ObjectMapper(); - this.schemaInfo = SchemaInfoImpl.builder() + this.schemaInfo = SchemaInfo.builder() .name(originSchemaName) .type(schemaType) .schema(schema.getBytes("ISO8859-1")) diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java index 3a9233c3d872b..828ceef3d44f2 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarSqlSchemaInfoProvider.java @@ -102,8 +102,13 @@ private SchemaInfo loadSchema(BytesSchemaVersion bytesSchemaVersion) throws Puls ClassLoader originalContextLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(InjectionManagerFactory.class.getClassLoader()); - return pulsarAdmin.schemas() - .getSchemaInfo(topicName.toString(), ByteBuffer.wrap(bytesSchemaVersion.get()).getLong()); + long version = ByteBuffer.wrap(bytesSchemaVersion.get()).getLong(); + SchemaInfo schemaInfo = pulsarAdmin.schemas().getSchemaInfo(topicName.toString(), version); + if (schemaInfo == null) { + throw new RuntimeException( + "The specific version (" + version + ") schema of the topic " + topicName + " is null"); + } + return schemaInfo; } finally { Thread.currentThread().setContextClassLoader(originalContextLoader); } diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java index 3b7e293557923..fc122fc1fa4ef 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java @@ -41,6 +41,8 @@ import io.prestosql.spi.type.BigintType; import io.prestosql.spi.type.BooleanType; import io.prestosql.spi.type.DateType; +import io.prestosql.spi.type.DecimalType; +import io.prestosql.spi.type.Decimals; import io.prestosql.spi.type.DoubleType; import io.prestosql.spi.type.IntegerType; import io.prestosql.spi.type.MapType; @@ -54,6 +56,7 @@ import io.prestosql.spi.type.Type; import io.prestosql.spi.type.VarbinaryType; import io.prestosql.spi.type.VarcharType; +import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.List; @@ -142,7 +145,7 @@ private boolean isSupportedType(Type type) { } private boolean isSupportedPrimitive(Type type) { - return type instanceof VarcharType || SUPPORTED_PRIMITIVE_TYPES.contains(type); + return type instanceof VarcharType || type instanceof DecimalType || SUPPORTED_PRIMITIVE_TYPES.contains(type); } public FieldValueProvider decodeField(GenericRecord avroRecord) { @@ -208,6 +211,13 @@ public long getLong() { return floatToIntBits((Float) value); } + if (columnType instanceof DecimalType) { + ByteBuffer buffer = (ByteBuffer) value; + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + return new BigInteger(bytes).longValue(); + } + throw new PrestoException(DECODER_CONVERSION_NOT_SUPPORTED, format("cannot decode object of '%s' as '%s' for column '%s'", value.getClass(), columnType, columnName)); @@ -237,6 +247,13 @@ private static Slice getSlice(Object value, Type type, String columnName) { } } + // The returned Slice size must be equals to 18 Byte + if (type instanceof DecimalType) { + ByteBuffer buffer = (ByteBuffer) value; + BigInteger bigInteger = new BigInteger(buffer.array()); + return Decimals.encodeUnscaledValue(bigInteger); + } + throw new PrestoException(DECODER_CONVERSION_NOT_SUPPORTED, format("cannot decode object of '%s' as '%s' for column '%s'", value.getClass(), type, columnName)); diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java index 26c333ab56b82..7f9169e527364 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java @@ -34,6 +34,7 @@ import io.prestosql.spi.type.ArrayType; import io.prestosql.spi.type.BigintType; import io.prestosql.spi.type.BooleanType; +import io.prestosql.spi.type.DecimalType; import io.prestosql.spi.type.DoubleType; import io.prestosql.spi.type.IntegerType; import io.prestosql.spi.type.RealType; @@ -130,7 +131,14 @@ private Type parseAvroPrestoType(String fieldname, Schema schema) { + "please check the schema or report the bug.", fieldname)); case FIXED: case BYTES: - //TODO: support decimal logicalType + // When the precision <= 0, throw Exception. + // When the precision > 0 and <= 18, use ShortDecimalType. and mapping Long + // When the precision > 18 and <= 36, use LongDecimalType. and mapping Slice + // When the precision > 36, throw Exception. + if (logicalType instanceof LogicalTypes.Decimal) { + LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; + return DecimalType.createDecimalType(decimal.getPrecision(), decimal.getScale()); + } return VarbinaryType.VARBINARY; case INT: if (logicalType == LogicalTypes.timeMillis()) { diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java index 960d8f42bdf7a..a14cd6b3ba2f2 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonFieldDecoder.java @@ -58,6 +58,8 @@ import io.prestosql.spi.type.BigintType; import io.prestosql.spi.type.BooleanType; import io.prestosql.spi.type.DateType; +import io.prestosql.spi.type.DecimalType; +import io.prestosql.spi.type.Decimals; import io.prestosql.spi.type.DoubleType; import io.prestosql.spi.type.IntegerType; import io.prestosql.spi.type.MapType; @@ -70,6 +72,7 @@ import io.prestosql.spi.type.Type; import io.prestosql.spi.type.VarbinaryType; import io.prestosql.spi.type.VarcharType; +import java.math.BigInteger; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -120,6 +123,9 @@ private static Pair getNumRangeByType(Type type) { } private boolean isSupportedType(Type type) { + if (type instanceof DecimalType) { + return true; + } if (isVarcharType(type)) { return true; } @@ -228,6 +234,13 @@ public static long getLong(JsonNode value, Type type, String columnName, long mi return floatToIntBits((Float) parseFloat(value.asText())); } + // If it is decimalType, need to eliminate the decimal point, + // and give it to presto to set the decimal point + if (type instanceof DecimalType) { + String decimalLong = value.asText().replace(".", ""); + return Long.valueOf(decimalLong); + } + long longValue; if (value.isIntegralNumber() && !value.isBigInteger()) { longValue = value.longValue(); @@ -267,6 +280,15 @@ public static double getDouble(JsonNode value, Type type, String columnName) { private static Slice getSlice(JsonNode value, Type type, String columnName) { String textValue = value.isValueNode() ? value.asText() : value.toString(); + + // If it is decimalType, need to eliminate the decimal point, + // and give it to presto to set the decimal point + if (type instanceof DecimalType) { + textValue = textValue.replace(".", ""); + BigInteger bigInteger = new BigInteger(textValue); + return Decimals.encodeUnscaledValue(bigInteger); + } + Slice slice = utf8Slice(textValue); if (isVarcharType(type)) { slice = truncateToLength(slice, type); diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java index 10a500ba3618e..b94a963cffd1a 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java @@ -34,6 +34,7 @@ import io.prestosql.spi.type.ArrayType; import io.prestosql.spi.type.BigintType; import io.prestosql.spi.type.BooleanType; +import io.prestosql.spi.type.DecimalType; import io.prestosql.spi.type.DoubleType; import io.prestosql.spi.type.IntegerType; import io.prestosql.spi.type.RealType; @@ -128,6 +129,14 @@ private Type parseJsonPrestoType(String fieldname, Schema schema) { + "please check the schema or report the bug.", fieldname)); case FIXED: case BYTES: + // When the precision <= 0, throw Exception. + // When the precision > 0 and <= 18, use ShortDecimalType. and mapping Long + // When the precision > 18 and <= 36, use LongDecimalType. and mapping Slice + // When the precision > 36, throw Exception. + if (logicalType instanceof LogicalTypes.Decimal) { + LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; + return DecimalType.createDecimalType(decimal.getPrecision(), decimal.getScale()); + } return VarbinaryType.VARBINARY; case INT: if (logicalType == LogicalTypes.timeMillis()) { diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/primitive/PulsarPrimitiveRowDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/primitive/PulsarPrimitiveRowDecoder.java index 5eb2d7f6db613..bc192c81eb012 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/primitive/PulsarPrimitiveRowDecoder.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/primitive/PulsarPrimitiveRowDecoder.java @@ -79,12 +79,12 @@ public Optional> decodeRow(ByteBuf primitiveColumn.put(columnHandle, booleanValueProvider(Boolean.valueOf((Boolean) value))); } else if (type instanceof TinyintType || type instanceof SmallintType || type instanceof IntegerType || type instanceof BigintType) { - primitiveColumn.put(columnHandle, longValueProvider(Long.valueOf(value.toString()))); + primitiveColumn.put(columnHandle, longValueProvider(Long.parseLong(value.toString()))); } else if (type instanceof DoubleType) { - primitiveColumn.put(columnHandle, doubleValueProvider(Double.valueOf(value.toString()))); + primitiveColumn.put(columnHandle, doubleValueProvider(Double.parseDouble(value.toString()))); } else if (type instanceof RealType) { primitiveColumn.put(columnHandle, longValueProvider( - Float.floatToIntBits((Float.valueOf(value.toString()))))); + Float.floatToIntBits((Float.parseFloat(value.toString()))))); } else if (type instanceof VarbinaryType) { primitiveColumn.put(columnHandle, bytesValueProvider((byte[]) value)); } else if (type instanceof VarcharType) { diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeColumnDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeColumnDecoder.java index 56e9b77cf679e..c71e7caaae876 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeColumnDecoder.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeColumnDecoder.java @@ -70,7 +70,8 @@ public class PulsarProtobufNativeColumnDecoder { BigintType.BIGINT, RealType.REAL, DoubleType.DOUBLE, - VarbinaryType.VARBINARY); + VarbinaryType.VARBINARY, + TimestampType.TIMESTAMP); private final Type columnType; private final String columnMapping; @@ -193,6 +194,15 @@ public long getLong() { return floatToIntBits((Float) value); } + //return millisecond which parsed from protobuf/timestamp + if (columnType instanceof TimestampType && value instanceof DynamicMessage) { + DynamicMessage message = (DynamicMessage) value; + int nanos = (int) message.getField(message.getDescriptorForType().findFieldByName("nanos")); + long seconds = (long) message.getField(message.getDescriptorForType().findFieldByName("seconds")); + //maybe an exception here, but seems will never happen in hundred years. + return seconds * MILLIS_PER_SECOND + nanos / NANOS_PER_MILLISECOND; + } + throw new PrestoException(DECODER_CONVERSION_NOT_SUPPORTED, format("cannot decode object of '%s' as '%s' for column '%s'", value.getClass(), columnType, columnName)); @@ -377,5 +387,6 @@ private static Block serializeRow(BlockBuilder parentBlockBuilder, Object value, protected static final String PROTOBUF_MAP_KEY_NAME = "key"; protected static final String PROTOBUF_MAP_VALUE_NAME = "value"; - + private static final long MILLIS_PER_SECOND = 1000; + private static final long NANOS_PER_MILLISECOND = 1000000; } diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeRowDecoderFactory.java index 3a415b334db75..2d0f9af72dd32 100644 --- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeRowDecoderFactory.java +++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/protobufnative/PulsarProtobufNativeRowDecoderFactory.java @@ -25,6 +25,7 @@ import com.google.common.collect.ImmutableList; import com.google.protobuf.Descriptors; +import com.google.protobuf.TimestampProto; import io.airlift.log.Logger; import io.prestosql.decoder.DecoderColumnHandle; import io.prestosql.spi.PrestoException; @@ -37,11 +38,13 @@ import io.prestosql.spi.type.RealType; import io.prestosql.spi.type.RowType; import io.prestosql.spi.type.StandardTypes; +import io.prestosql.spi.type.TimestampType; import io.prestosql.spi.type.Type; import io.prestosql.spi.type.TypeManager; import io.prestosql.spi.type.TypeSignature; import io.prestosql.spi.type.TypeSignatureParameter; import io.prestosql.spi.type.VarbinaryType; + import java.util.List; import java.util.Optional; import java.util.Set; @@ -142,11 +145,16 @@ private Type parseProtobufPrestoType(Descriptors.FieldDescriptor field) { ImmutableList.of(TypeSignatureParameter.typeParameter(keyType), TypeSignatureParameter.typeParameter(valueType))); } else { - //row - dataType = RowType.from(msg.getFields().stream() - .map(rowField -> new RowType.Field(Optional.of(rowField.getName()), - parseProtobufPrestoType(rowField))) - .collect(toImmutableList())); + if (TimestampProto.getDescriptor().toProto().getName().equals(msg.getFile().toProto().getName())) { + //if msg type is protobuf/timestamp + dataType = TimestampType.TIMESTAMP; + } else { + //row + dataType = RowType.from(msg.getFields().stream() + .map(rowField -> new RowType.Field(Optional.of(rowField.getName()), + parseProtobufPrestoType(rowField))) + .collect(toImmutableList())); + } } break; default: diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java index 39d8ba8213cc8..7db32f591482f 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java @@ -25,6 +25,7 @@ import io.prestosql.spi.connector.ConnectorContext; import io.prestosql.spi.predicate.TupleDomain; import io.prestosql.testing.TestingConnectorContext; +import java.math.BigDecimal; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.Entry; import org.apache.bookkeeper.mledger.ManagedLedgerConfig; @@ -166,6 +167,8 @@ public enum TestEnum { public int time; @org.apache.avro.reflect.AvroSchema("{ \"type\": \"int\", \"logicalType\": \"date\" }") public int date; + @org.apache.avro.reflect.AvroSchema("{ \"type\": \"bytes\", \"logicalType\": \"decimal\", \"precision\": 4, \"scale\": 2 }") + public BigDecimal decimal; public TestPulsarConnector.Bar bar; public TestEnum field7; } @@ -253,6 +256,7 @@ public static class Bar { fooFieldNames.add("date"); fooFieldNames.add("bar"); fooFieldNames.add("field7"); + fooFieldNames.add("decimal"); ConnectorContext prestoConnectorContext = new TestingConnectorContext(); @@ -313,6 +317,7 @@ public static class Bar { LocalDate epoch = LocalDate.ofEpochDay(0); return Math.toIntExact(ChronoUnit.DAYS.between(epoch, localDate)); }); + fooFunctions.put("decimal", integer -> BigDecimal.valueOf(1234, 2)); fooFunctions.put("bar.field1", integer -> integer % 3 == 0 ? null : integer + 1); fooFunctions.put("bar.field2", integer -> integer % 2 == 0 ? null : String.valueOf(integer + 2)); fooFunctions.put("bar.field3", integer -> integer + 3.0f); @@ -331,7 +336,6 @@ public static class Bar { * @param schemaInfo * @param handleKeyValueType * @param includeInternalColumn - * @param dispatchingRowDecoderFactory * @return */ protected static List getColumnColumnHandles(TopicName topicName, SchemaInfo schemaInfo, @@ -357,7 +361,7 @@ protected static List getColumnColumnHandles(TopicName topic public static PulsarMetadata mockColumnMetadata() { ConnectorContext prestoConnectorContext = new TestingConnectorContext(); - PulsarConnectorConfig pulsarConnectorConfig = spy(new PulsarConnectorConfig()); + PulsarConnectorConfig pulsarConnectorConfig = spy(PulsarConnectorConfig.class); pulsarConnectorConfig.setMaxEntryReadBatchSize(1); pulsarConnectorConfig.setMaxSplitEntryQueueSize(10); pulsarConnectorConfig.setMaxSplitMessageQueueSize(100); @@ -393,6 +397,7 @@ private static List getTopicEntries(String topicSchemaName) { LocalDate localDate = LocalDate.now(); LocalDate epoch = LocalDate.ofEpochDay(0); foo.date = Math.toIntExact(ChronoUnit.DAYS.between(epoch, localDate)); + foo.decimal= BigDecimal.valueOf(count, 2); MessageMetadata messageMetadata = new MessageMetadata() .setProducerName("test-producer").setSequenceId(i) @@ -446,7 +451,7 @@ protected static List getPartitionedTopics(String ns) { @BeforeMethod public void setup() throws Exception { - this.pulsarConnectorConfig = spy(new PulsarConnectorConfig()); + this.pulsarConnectorConfig = spy(PulsarConnectorConfig.class); this.pulsarConnectorConfig.setMaxEntryReadBatchSize(1); this.pulsarConnectorConfig.setMaxSplitEntryQueueSize(10); this.pulsarConnectorConfig.setMaxSplitMessageQueueSize(100); @@ -609,6 +614,7 @@ public void run() { foo.timestamp = (long) fooFunctions.get("timestamp").apply(count); foo.time = (int) fooFunctions.get("time").apply(count); foo.date = (int) fooFunctions.get("date").apply(count); + foo.decimal = (BigDecimal) fooFunctions.get("decimal").apply(count); foo.bar = bar; foo.field7 = (Foo.TestEnum) fooFunctions.get("field7").apply(count); diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarMetadata.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarMetadata.java index 79fb7893203c2..26199ba31debc 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarMetadata.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarMetadata.java @@ -189,7 +189,7 @@ public void testGetTableMetadataTableNoSchema(String delimiter) throws PulsarAdm @Test(dataProvider = "rewriteNamespaceDelimiter", singleThreaded = true) public void testGetTableMetadataTableBlankSchema(String delimiter) throws PulsarAdminException { updateRewriteNamespaceDelimiterIfNeeded(delimiter); - SchemaInfo badSchemaInfo = SchemaInfoImpl.builder() + SchemaInfo badSchemaInfo = SchemaInfo.builder() .schema(new byte[0]) .type(SchemaType.AVRO) .build(); @@ -216,7 +216,7 @@ public void testGetTableMetadataTableBlankSchema(String delimiter) throws Pulsar @Test(dataProvider = "rewriteNamespaceDelimiter", singleThreaded = true) public void testGetTableMetadataTableInvalidSchema(String delimiter) throws PulsarAdminException { updateRewriteNamespaceDelimiterIfNeeded(delimiter); - SchemaInfo badSchemaInfo = SchemaInfoImpl.builder() + SchemaInfo badSchemaInfo = SchemaInfo.builder() .schema("foo".getBytes()) .type(SchemaType.AVRO) .build(); diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java index 8d521834c6a5c..880c2fb585bbb 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java @@ -22,7 +22,11 @@ import io.airlift.log.Logger; import io.netty.buffer.ByteBuf; import io.prestosql.spi.predicate.TupleDomain; +import io.prestosql.spi.type.DecimalType; import io.prestosql.spi.type.RowType; +import io.prestosql.spi.type.Type; +import io.prestosql.spi.type.VarcharType; +import java.math.BigDecimal; import lombok.Data; import org.apache.bookkeeper.mledger.AsyncCallbacks; import org.apache.bookkeeper.mledger.Entry; @@ -34,18 +38,31 @@ import org.apache.bookkeeper.mledger.impl.ReadOnlyCursorImpl; import org.apache.bookkeeper.mledger.proto.MLDataFormats; import org.apache.bookkeeper.stats.NullStatsProvider; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.Schemas; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.impl.schema.KeyValueSchemaImpl; import org.apache.pulsar.common.api.proto.MessageMetadata; +import org.apache.pulsar.common.api.raw.RawMessage; +import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.protocol.Commands; +import org.apache.pulsar.common.protocol.schema.BytesSchemaVersion; import org.apache.pulsar.common.schema.KeyValue; import org.apache.pulsar.common.schema.KeyValueEncodingType; +import org.apache.pulsar.common.schema.LongSchemaVersion; +import org.apache.pulsar.common.schema.SchemaInfo; +import org.apache.pulsar.common.schema.SchemaType; +import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.testng.annotations.Test; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.nio.charset.Charset; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; @@ -56,6 +73,8 @@ import static org.apache.pulsar.common.protocol.Commands.serializeMetadataAndPayload; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -65,6 +84,7 @@ import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; public class TestPulsarRecordCursor extends TestPulsarConnector { @@ -101,7 +121,7 @@ public void testTopics() throws Exception { assertEquals(pulsarRecordCursor.getSlice(i).getBytes(), ((String) fooFunctions.get("field2").apply(count)).getBytes()); columnsSeen.add(fooColumnHandles.get(i).getName()); } else if (fooColumnHandles.get(i).getName().equals("field3")) { - assertEquals(pulsarRecordCursor.getLong(i), Float.floatToIntBits(((Float) fooFunctions.get("field3").apply(count)).floatValue())); + assertEquals(pulsarRecordCursor.getLong(i), Float.floatToIntBits((Float) fooFunctions.get("field3").apply(count))); columnsSeen.add(fooColumnHandles.get(i).getName()); } else if (fooColumnHandles.get(i).getName().equals("field4")) { assertEquals(pulsarRecordCursor.getDouble(i), ((Double) fooFunctions.get("field4").apply(count)).doubleValue()); @@ -127,6 +147,17 @@ public void testTopics() throws Exception { }else if (fooColumnHandles.get(i).getName().equals("field7")) { assertEquals(pulsarRecordCursor.getSlice(i).getBytes(), fooFunctions.get("field7").apply(count).toString().getBytes()); columnsSeen.add(fooColumnHandles.get(i).getName()); + }else if (fooColumnHandles.get(i).getName().equals("decimal")) { + Type type = fooColumnHandles.get(i).getType(); + // In JsonDecoder, decimal trans to varcharType + if (type instanceof VarcharType) { + assertEquals(new String(pulsarRecordCursor.getSlice(i).getBytes()), + fooFunctions.get("decimal").apply(count).toString()); + } else { + DecimalType decimalType = (DecimalType) fooColumnHandles.get(i).getType(); + assertEquals(BigDecimal.valueOf(pulsarRecordCursor.getLong(i), decimalType.getScale()), fooFunctions.get("decimal").apply(count)); + } + columnsSeen.add(fooColumnHandles.get(i).getName()); } else { if (PulsarInternalColumn.getInternalFieldsMap().containsKey(fooColumnHandles.get(i).getName())) { columnsSeen.add(fooColumnHandles.get(i).getName()); @@ -323,9 +354,14 @@ public void run() { MessageMetadata messageMetadata = new MessageMetadata() - .setProducerName("test-producer").setSequenceId(positions.get(topic)) + .setProducerName("test-producer") + .setSequenceId(positions.get(topic)) .setPublishTime(System.currentTimeMillis()); + if (i % 2 == 0) { + messageMetadata.setSchemaVersion(new LongSchemaVersion(1L).bytes()); + } + if (KeyValueEncodingType.SEPARATED.equals(schema.getKeyValueEncodingType())) { messageMetadata .setPartitionKey(new String(schema @@ -380,7 +416,7 @@ public Long answer(InvocationOnMock invocationOnMock) throws Throwable { PulsarSplit split = new PulsarSplit(0, pulsarConnectorId.toString(), topicName.getNamespace(), topicName.getLocalName(), topicName.getLocalName(), entriesNum, - new String(schema.getSchemaInfo().getSchema()), + new String(schema.getSchemaInfo().getSchema(), "ISO8859-1"), schema.getSchemaInfo().getType(), 0, entriesNum, 0, 0, TupleDomain.all(), @@ -416,4 +452,86 @@ static class Boo { private Double field3; } + @Test + public void testGetSchemaInfo() throws Exception { + String topic = "get-schema-test"; + PulsarSplit pulsarSplit = Mockito.mock(PulsarSplit.class); + Mockito.when(pulsarSplit.getTableName()).thenReturn(TopicName.get(topic).getLocalName()); + Mockito.when(pulsarSplit.getSchemaName()).thenReturn("public/default"); + PulsarAdmin pulsarAdmin = Mockito.mock(PulsarAdmin.class); + Schemas schemas = Mockito.mock(Schemas.class); + Mockito.when(pulsarAdmin.schemas()).thenReturn(schemas); + PulsarConnectorConfig connectorConfig = spy(PulsarConnectorConfig.class); + Mockito.when(connectorConfig.getPulsarAdmin()).thenReturn(pulsarAdmin); + PulsarRecordCursor pulsarRecordCursor = spy(new PulsarRecordCursor( + new ArrayList<>(), pulsarSplit, connectorConfig, Mockito.mock(ManagedLedgerFactory.class), + new ManagedLedgerConfig(), null, null)); + + Class clazz = PulsarRecordCursor.class; + Method getSchemaInfo = clazz.getDeclaredMethod("getSchemaInfo", PulsarSplit.class); + getSchemaInfo.setAccessible(true); + Field currentMessage = clazz.getDeclaredField("currentMessage"); + currentMessage.setAccessible(true); + RawMessage rawMessage = Mockito.mock(RawMessage.class); + currentMessage.set(pulsarRecordCursor, rawMessage); + + // If the schemaType of pulsarSplit is NONE or BYTES, using bytes schema + Mockito.when(pulsarSplit.getSchemaType()).thenReturn(SchemaType.NONE); + SchemaInfo schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + assertEquals(SchemaType.BYTES, schemaInfo.getType()); + + Mockito.when(pulsarSplit.getSchemaType()).thenReturn(SchemaType.BYTES); + schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + assertEquals(SchemaType.BYTES, schemaInfo.getType()); + + Mockito.when(pulsarSplit.getSchemaName()).thenReturn(Schema.BYTEBUFFER.getSchemaInfo().getName()); + schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + assertEquals(SchemaType.BYTES, schemaInfo.getType()); + + // If the schemaVersion of the message is not null, try to get the schema. + Mockito.when(pulsarSplit.getSchemaType()).thenReturn(SchemaType.AVRO); + Mockito.when(rawMessage.getSchemaVersion()).thenReturn(new LongSchemaVersion(0).bytes()); + Mockito.when(schemas.getSchemaInfo(anyString(), eq(0L))) + .thenReturn(Schema.AVRO(Foo.class).getSchemaInfo()); + schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + assertEquals(SchemaType.AVRO, schemaInfo.getType()); + + String schemaTopic = "persistent://public/default/" + topic; + + // If the schemaVersion of the message is null and the schema of pulsarSplit is null, throw runtime exception. + Mockito.when(pulsarSplit.getSchemaInfo()).thenReturn(null); + Mockito.when(rawMessage.getSchemaVersion()).thenReturn(null); + try { + schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + fail("The message schema version is null and the latest schema is null, should fail."); + } catch (InvocationTargetException e) { + assertTrue(e.getCause() instanceof RuntimeException); + assertTrue(e.getCause().getMessage().contains("schema of the table " + topic + " is null")); + } + + // If the schemaVersion of the message is null, try to get the latest schema. + Mockito.when(rawMessage.getSchemaVersion()).thenReturn(null); + Mockito.when(pulsarSplit.getSchemaInfo()).thenReturn(Schema.AVRO(Foo.class).getSchemaInfo()); + schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + assertEquals(Schema.AVRO(Foo.class).getSchemaInfo(), schemaInfo); + + // If the specific version schema is null, throw runtime exception. + Mockito.when(rawMessage.getSchemaVersion()).thenReturn(new LongSchemaVersion(1L).bytes()); + Mockito.when(schemas.getSchemaInfo(schemaTopic, 1)).thenReturn(null); + try { + schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + fail("The specific version " + 1 + " schema is null, should fail."); + } catch (InvocationTargetException e) { + String schemaVersion = BytesSchemaVersion.of(new LongSchemaVersion(1L).bytes()).toString(); + assertTrue(e.getCause() instanceof RuntimeException); + assertTrue(e.getCause().getMessage().contains("schema of the topic " + schemaTopic + " is null")); + } + + // Get the specific version schema. + Mockito.when(rawMessage.getSchemaVersion()).thenReturn(new LongSchemaVersion(2L).bytes()); + Mockito.when(schemas.getSchemaInfo(schemaTopic, 2)).thenReturn(Schema.AVRO(Foo.class).getSchemaInfo()); + schemaInfo = (SchemaInfo) getSchemaInfo.invoke(pulsarRecordCursor, pulsarSplit); + assertEquals(Schema.AVRO(Foo.class).getSchemaInfo(), schemaInfo); + } + } diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestReadChunkedMessages.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestReadChunkedMessages.java new file mode 100644 index 0000000000000..0a02dc308251c --- /dev/null +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestReadChunkedMessages.java @@ -0,0 +1,214 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.sql.presto; + +import com.google.common.collect.Sets; +import io.prestosql.spi.connector.ConnectorContext; +import io.prestosql.spi.predicate.TupleDomain; +import io.prestosql.testing.TestingConnectorContext; +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.extern.slf4j.Slf4j; +import org.apache.bookkeeper.mledger.ManagedLedgerConfig; +import org.apache.bookkeeper.stats.NullStatsProvider; +import org.apache.commons.lang3.RandomUtils; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.apache.pulsar.common.schema.SchemaInfo; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +/** + * Test read chunked messages. + */ +@Test +@Slf4j +public class TestReadChunkedMessages extends MockedPulsarServiceBaseTest { + + private final static int MAX_MESSAGE_SIZE = 1024 * 1024; + + @EqualsAndHashCode + @Data + static class Movie { + private String name; + private Long publishTime; + private byte[] binaryData; + } + + @EqualsAndHashCode + @Data + static class MovieMessage { + private Movie movie; + private String messageId; + } + + @BeforeClass + @Override + protected void setup() throws Exception { + conf.setMaxMessageSize(MAX_MESSAGE_SIZE); + conf.setManagedLedgerMaxEntriesPerLedger(5); + conf.setManagedLedgerMinLedgerRolloverTimeMinutes(0); + internalSetup(); + + admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(brokerUrl.toString()).build()); + + // so that clients can test short names + admin.tenants().createTenant("public", + new TenantInfoImpl(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("test"))); + admin.namespaces().createNamespace("public/default"); + admin.namespaces().setNamespaceReplicationClusters("public/default", Sets.newHashSet("test")); + } + + @AfterClass + @Override + protected void cleanup() throws Exception { + internalCleanup(); + } + + @Test + public void queryTest() throws Exception { + String topic = "chunk-topic"; + TopicName topicName = TopicName.get(topic); + int messageCnt = 20; + Set messageSet = prepareChunkedData(topic, messageCnt); + SchemaInfo schemaInfo = Schema.AVRO(Movie.class).getSchemaInfo(); + + PulsarConnectorConfig connectorConfig = new PulsarConnectorConfig(); + connectorConfig.setWebServiceUrl(pulsar.getWebServiceAddress()); + PulsarSplitManager pulsarSplitManager = new PulsarSplitManager(new PulsarConnectorId("1"), connectorConfig); + Collection splits = pulsarSplitManager.getSplitsForTopic( + topicName.getPersistenceNamingEncoding(), + pulsar.getManagedLedgerFactory(), + new ManagedLedgerConfig(), + 3, + new PulsarTableHandle("1", topicName.getNamespace(), topic, topic), + schemaInfo, + topic, + TupleDomain.all(), + null); + + List columnHandleList = TestPulsarConnector.getColumnColumnHandles( + topicName, schemaInfo, PulsarColumnHandle.HandleKeyValueType.NONE, true); + ConnectorContext prestoConnectorContext = new TestingConnectorContext(); + + for (PulsarSplit split : splits) { + queryAndCheck(columnHandleList, split, connectorConfig, prestoConnectorContext, messageSet); + } + Assert.assertTrue(messageSet.isEmpty()); + } + + private Set prepareChunkedData(String topic, int messageCnt) throws PulsarClientException, InterruptedException { + pulsarClient.newConsumer(Schema.AVRO(Movie.class)) + .topic(topic) + .subscriptionName("sub") + .subscribe() + .close(); + Producer producer = pulsarClient.newProducer(Schema.AVRO(Movie.class)) + .topic(topic) + .enableBatching(false) + .enableChunking(true) + .create(); + Set messageSet = new LinkedHashSet<>(); + CountDownLatch countDownLatch = new CountDownLatch(messageCnt); + for (int i = 0; i < messageCnt; i++) { + final double dataTimes = (i % 5) * 0.5; + byte[] movieBinaryData = RandomUtils.nextBytes((int) (MAX_MESSAGE_SIZE * dataTimes)); + final int length = movieBinaryData.length; + final int index = i; + + Movie movie = new Movie(); + movie.setName("movie-" + i); + movie.setPublishTime(System.currentTimeMillis()); + movie.setBinaryData(movieBinaryData); + producer.newMessage().value(movie).sendAsync() + .whenComplete((msgId, throwable) -> { + if (throwable != null) { + log.error("Failed to produce message.", throwable); + countDownLatch.countDown(); + return; + } + MovieMessage movieMessage = new MovieMessage(); + movieMessage.setMovie(movie); + MessageIdImpl messageId = (MessageIdImpl) msgId; + movieMessage.setMessageId("(" + messageId.getLedgerId() + "," + messageId.getEntryId() + ",0)"); + messageSet.add(movieMessage); + countDownLatch.countDown(); + }); + } + countDownLatch.await(); + Assert.assertEquals(messageCnt, messageSet.size()); + producer.close(); + return messageSet; + } + + private void queryAndCheck(List columnHandleList, + PulsarSplit split, + PulsarConnectorConfig connectorConfig, + ConnectorContext prestoConnectorContext, + Set messageSet) { + PulsarRecordCursor pulsarRecordCursor = new PulsarRecordCursor( + columnHandleList, split, connectorConfig, pulsar.getManagedLedgerFactory(), + new ManagedLedgerConfig(), new PulsarConnectorMetricsTracker(new NullStatsProvider()), + new PulsarDispatchingRowDecoderFactory(prestoConnectorContext.getTypeManager())); + + AtomicInteger receiveMsgCnt = new AtomicInteger(messageSet.size()); + while (pulsarRecordCursor.advanceNextPosition()) { + Movie movie = new Movie(); + MovieMessage movieMessage = new MovieMessage(); + movieMessage.setMovie(movie); + for (int i = 0; i < columnHandleList.size(); i++) { + switch (columnHandleList.get(i).getName()) { + case "binaryData": + movie.setBinaryData(pulsarRecordCursor.getSlice(i).getBytes()); + break; + case "name": + movie.setName(new String(pulsarRecordCursor.getSlice(i).getBytes())); + break; + case "publishTime": + movie.setPublishTime(pulsarRecordCursor.getLong(i)); + break; + case "__message_id__": + movieMessage.setMessageId(new String(pulsarRecordCursor.getSlice(i).getBytes())); + default: + // do nothing + break; + } + } + + Assert.assertTrue(messageSet.contains(movieMessage)); + messageSet.remove(movieMessage); + receiveMsgCnt.decrementAndGet(); + } + } + +} diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java index cd4fcaf0d1e33..e5ceb321aaec5 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java @@ -26,6 +26,7 @@ import io.prestosql.spi.connector.ConnectorContext; import io.prestosql.spi.type.Type; import io.prestosql.testing.TestingConnectorContext; +import java.math.BigDecimal; import org.apache.pulsar.common.naming.NamespaceName; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.common.schema.SchemaInfo; @@ -62,7 +63,7 @@ public abstract class AbstractDecoderTester { protected void init() { ConnectorContext prestoConnectorContext = new TestingConnectorContext(); this.decoderFactory = new PulsarDispatchingRowDecoderFactory(prestoConnectorContext.getTypeManager()); - this.pulsarConnectorConfig = spy(new PulsarConnectorConfig()); + this.pulsarConnectorConfig = spy(PulsarConnectorConfig.class); this.pulsarConnectorConfig.setMaxEntryReadBatchSize(1); this.pulsarConnectorConfig.setMaxSplitEntryQueueSize(10); this.pulsarConnectorConfig.setMaxSplitMessageQueueSize(100); @@ -102,6 +103,10 @@ protected void checkValue(Map decodedRo decoderTestUtil.checkValue(decodedRow, handle, value); } + protected void checkValue(Map decodedRow, DecoderColumnHandle handle, BigDecimal value) { + decoderTestUtil.checkValue(decodedRow, handle, value); + } + protected Block getBlock(Map decodedRow, DecoderColumnHandle handle) { FieldValueProvider provider = decodedRow.get(handle); assertNotNull(provider); diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java index 115f3691c00f8..da6d92e5158ac 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.sql.presto.decoder; +import java.math.BigDecimal; import lombok.Data; import java.util.List; @@ -45,6 +46,10 @@ public static enum TestEnum { public int dateField; public TestRow rowField; public TestEnum enumField; + @org.apache.avro.reflect.AvroSchema("{ \"type\": \"bytes\", \"logicalType\": \"decimal\", \"precision\": 4, \"scale\": 2 }") + public BigDecimal decimalField; + @org.apache.avro.reflect.AvroSchema("{ \"type\": \"bytes\", \"logicalType\": \"decimal\", \"precision\": 30, \"scale\": 2 }") + public BigDecimal longDecimalField; public List arrayField; public Map mapField; @@ -62,7 +67,6 @@ public static class NestedRow { public long longField; } - public static class CompositeRow { public String stringField; public List arrayField; diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java index 4c3c4a634472c..496a6f061bfca 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java @@ -23,11 +23,16 @@ import io.prestosql.decoder.FieldValueProvider; import io.prestosql.spi.block.Block; import io.prestosql.spi.type.ArrayType; +import io.prestosql.spi.type.DecimalType; +import io.prestosql.spi.type.Decimals; import io.prestosql.spi.type.MapType; import io.prestosql.spi.type.RowType; import io.prestosql.spi.type.Type; +import java.math.BigDecimal; +import java.math.BigInteger; import java.util.Map; +import static io.prestosql.spi.type.UnscaledDecimal128Arithmetic.UNSCALED_DECIMAL_128_SLICE_LENGTH; import static io.prestosql.testing.TestingConnectorSession.SESSION; import static org.testng.Assert.*; @@ -113,6 +118,21 @@ public void checkValue(Map decodedRow, assertEquals(provider.getBoolean(), value); } + public void checkValue(Map decodedRow, DecoderColumnHandle handle, BigDecimal value) { + FieldValueProvider provider = decodedRow.get(handle); + DecimalType decimalType = (DecimalType) handle.getType(); + BigDecimal actualDecimal; + if (decimalType.getFixedSize() == UNSCALED_DECIMAL_128_SLICE_LENGTH) { + Slice slice = provider.getSlice(); + BigInteger bigInteger = Decimals.decodeUnscaledValue(slice); + actualDecimal = new BigDecimal(bigInteger, decimalType.getScale()); + } else { + actualDecimal = BigDecimal.valueOf(provider.getLong(), decimalType.getScale()); + } + assertNotNull(provider); + assertEquals(actualDecimal, value); + } + public void checkIsNull(Map decodedRow, DecoderColumnHandle handle) { FieldValueProvider provider = decodedRow.get(handle); assertNotNull(provider); diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java index 1cfbbb4fce5e9..2478300dcaa16 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java @@ -25,11 +25,13 @@ import io.prestosql.spi.PrestoException; import io.prestosql.spi.type.ArrayType; import io.prestosql.spi.type.BigintType; +import io.prestosql.spi.type.DecimalType; import io.prestosql.spi.type.RowType; import io.prestosql.spi.type.StandardTypes; import io.prestosql.spi.type.Type; import io.prestosql.spi.type.TypeSignatureParameter; import io.prestosql.spi.type.VarcharType; +import java.math.BigDecimal; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -126,7 +128,25 @@ public void testPrimitiveType() { PulsarColumnHandle enumFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), "enumField", VARCHAR, false, false, "enumField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); checkValue(decodedRow, enumFieldColumnHandle, message.enumField.toString()); + } + + @Test + public void testDecimal() { + DecoderTestMessage message = new DecoderTestMessage(); + message.decimalField = BigDecimal.valueOf(2233, 2); + message.longDecimalField = new BigDecimal("1234567891234567891234567891.23"); + + ByteBuf payload = io.netty.buffer.Unpooled + .copiedBuffer(schema.encode(message)); + Map decodedRow = pulsarRowDecoder.decodeRow(payload).get(); + + PulsarColumnHandle decimalFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), + "decimalField", DecimalType.createDecimalType(4, 2), false, false, "decimalField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); + checkValue(decodedRow, decimalFieldColumnHandle, message.decimalField); + PulsarColumnHandle longDecimalFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), + "longDecimalField", DecimalType.createDecimalType(30, 2), false, false, "longDecimalField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); + checkValue(decodedRow, longDecimalFieldColumnHandle, message.longDecimalField); } @Test diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java index 2a22b58a03fb2..0b8a8f84eda0e 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/json/TestJsonDecoder.java @@ -24,6 +24,7 @@ import io.prestosql.decoder.FieldValueProvider; import io.prestosql.spi.PrestoException; import io.prestosql.spi.type.*; +import java.math.BigDecimal; import org.apache.pulsar.client.impl.schema.JSONSchema; import org.apache.pulsar.client.impl.schema.generic.GenericJsonRecord; import org.apache.pulsar.client.impl.schema.generic.GenericJsonSchema; @@ -119,6 +120,25 @@ public void testPrimitiveType() { } + @Test + public void testDecimal() { + DecoderTestMessage message = new DecoderTestMessage(); + message.decimalField = BigDecimal.valueOf(2233, 2); + message.longDecimalField = new BigDecimal("1234567891234567891234567891.23"); + + ByteBuf payload = io.netty.buffer.Unpooled + .copiedBuffer(schema.encode(message)); + Map decodedRow = pulsarRowDecoder.decodeRow(payload).get(); + + PulsarColumnHandle decimalFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), + "decimalField", DecimalType.createDecimalType(4, 2), false, false, "decimalField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); + checkValue(decodedRow, decimalFieldColumnHandle, message.decimalField); + + PulsarColumnHandle longDecimalFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), + "longDecimalField", DecimalType.createDecimalType(30, 2), false, false, "longDecimalField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE); + checkValue(decodedRow, longDecimalFieldColumnHandle, message.longDecimalField); + } + @Test public void testArray() { DecoderTestMessage message = new DecoderTestMessage(); diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/primitive/TestPrimitiveDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/primitive/TestPrimitiveDecoder.java index c1b97d3a32b22..97d4d055598b5 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/primitive/TestPrimitiveDecoder.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/primitive/TestPrimitiveDecoder.java @@ -66,7 +66,7 @@ public void init() { public void testPrimitiveType() { byte int8Value = 1; - SchemaInfo schemaInfoInt8 = SchemaInfoImpl.builder().type(SchemaType.INT8).build(); + SchemaInfo schemaInfoInt8 = SchemaInfo.builder().type(SchemaType.INT8).build(); Schema schemaInt8 = Schema.getSchema(schemaInfoInt8); List pulsarColumnHandleInt8 = getColumnColumnHandles(topicName, schemaInfoInt8, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); PulsarRowDecoder pulsarRowDecoderInt8 = decoderFactory.createRowDecoder(topicName, schemaInfoInt8, @@ -78,7 +78,7 @@ public void testPrimitiveType() { PRIMITIVE_COLUMN_NAME, TINYINT, false, false, PRIMITIVE_COLUMN_NAME, null, null, PulsarColumnHandle.HandleKeyValueType.NONE), int8Value); short int16Value = 2; - SchemaInfo schemaInfoInt16 = SchemaInfoImpl.builder().type(SchemaType.INT16).build(); + SchemaInfo schemaInfoInt16 = SchemaInfo.builder().type(SchemaType.INT16).build(); Schema schemaInt16 = Schema.getSchema(schemaInfoInt16); List pulsarColumnHandleInt16 = getColumnColumnHandles(topicName, schemaInfoInt16, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); PulsarRowDecoder pulsarRowDecoderInt16 = decoderFactory.createRowDecoder(topicName, schemaInfoInt16, @@ -90,7 +90,7 @@ public void testPrimitiveType() { PRIMITIVE_COLUMN_NAME, SMALLINT, false, false, PRIMITIVE_COLUMN_NAME, null, null, PulsarColumnHandle.HandleKeyValueType.NONE), int16Value); int int32Value = 2; - SchemaInfo schemaInfoInt32 = SchemaInfoImpl.builder().type(SchemaType.INT32).build(); + SchemaInfo schemaInfoInt32 = SchemaInfo.builder().type(SchemaType.INT32).build(); Schema schemaInt32 = Schema.getSchema(schemaInfoInt32); List pulsarColumnHandleInt32 = getColumnColumnHandles(topicName, schemaInfoInt32, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -103,7 +103,7 @@ public void testPrimitiveType() { PRIMITIVE_COLUMN_NAME, INTEGER, false, false, PRIMITIVE_COLUMN_NAME, null, null, PulsarColumnHandle.HandleKeyValueType.NONE), int32Value); long int64Value = 2; - SchemaInfo schemaInfoInt64 = SchemaInfoImpl.builder().type(SchemaType.INT64).build(); + SchemaInfo schemaInfoInt64 = SchemaInfo.builder().type(SchemaType.INT64).build(); Schema schemaInt64 = Schema.getSchema(schemaInfoInt64); List pulsarColumnHandleInt64 = getColumnColumnHandles(topicName, schemaInfoInt64, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -117,7 +117,7 @@ public void testPrimitiveType() { PulsarColumnHandle.HandleKeyValueType.NONE), int64Value); String stringValue = "test"; - SchemaInfo schemaInfoString = SchemaInfoImpl.builder().type(SchemaType.STRING).build(); + SchemaInfo schemaInfoString = SchemaInfo.builder().type(SchemaType.STRING).build(); Schema schemaString = Schema.getSchema(schemaInfoString); List pulsarColumnHandleString = getColumnColumnHandles(topicName, schemaInfoString, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -131,7 +131,7 @@ public void testPrimitiveType() { PulsarColumnHandle.HandleKeyValueType.NONE), stringValue); float floatValue = 0.2f; - SchemaInfo schemaInfoFloat = SchemaInfoImpl.builder().type(SchemaType.FLOAT).build(); + SchemaInfo schemaInfoFloat = SchemaInfo.builder().type(SchemaType.FLOAT).build(); Schema schemaFloat = Schema.getSchema(schemaInfoFloat); List pulsarColumnHandleFloat = getColumnColumnHandles(topicName, schemaInfoFloat, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -142,10 +142,10 @@ public void testPrimitiveType() { .copiedBuffer(schemaFloat.encode(floatValue))).get(); checkValue(decodedRowFloat, new PulsarColumnHandle(getPulsarConnectorId().toString(), PRIMITIVE_COLUMN_NAME, REAL, false, false, PRIMITIVE_COLUMN_NAME, null, null, - PulsarColumnHandle.HandleKeyValueType.NONE), Long.valueOf(Float.floatToIntBits(floatValue))); + PulsarColumnHandle.HandleKeyValueType.NONE), Float.floatToIntBits(floatValue)); double doubleValue = 0.22d; - SchemaInfo schemaInfoDouble = SchemaInfoImpl.builder().type(SchemaType.DOUBLE).build(); + SchemaInfo schemaInfoDouble = SchemaInfo.builder().type(SchemaType.DOUBLE).build(); Schema schemaDouble = Schema.getSchema(schemaInfoDouble); List pulsarColumnHandleDouble = getColumnColumnHandles(topicName, schemaInfoDouble, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -159,7 +159,7 @@ public void testPrimitiveType() { PulsarColumnHandle.HandleKeyValueType.NONE), doubleValue); boolean booleanValue = true; - SchemaInfo schemaInfoBoolean = SchemaInfoImpl.builder().type(SchemaType.BOOLEAN).build(); + SchemaInfo schemaInfoBoolean = SchemaInfo.builder().type(SchemaType.BOOLEAN).build(); Schema schemaBoolean = Schema.getSchema(schemaInfoBoolean); List pulsarColumnHandleBoolean = getColumnColumnHandles(topicName, schemaInfoBoolean, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -174,7 +174,7 @@ public void testPrimitiveType() { byte[] bytesValue = new byte[1]; bytesValue[0] = 1; - SchemaInfo schemaInfoBytes = SchemaInfoImpl.builder().type(SchemaType.BYTES).build(); + SchemaInfo schemaInfoBytes = SchemaInfo.builder().type(SchemaType.BYTES).build(); Schema schemaBytes = Schema.getSchema(schemaInfoBytes); List pulsarColumnHandleBytes = getColumnColumnHandles(topicName, schemaInfoBytes, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -188,7 +188,7 @@ public void testPrimitiveType() { PulsarColumnHandle.HandleKeyValueType.NONE), Slices.wrappedBuffer(bytesValue)); Date dateValue = new Date(System.currentTimeMillis()); - SchemaInfo schemaInfoDate = SchemaInfoImpl.builder().type(SchemaType.DATE).build(); + SchemaInfo schemaInfoDate = SchemaInfo.builder().type(SchemaType.DATE).build(); Schema schemaDate = Schema.getSchema(schemaInfoDate); List pulsarColumnHandleDate = getColumnColumnHandles(topicName, schemaInfoDate, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -202,7 +202,7 @@ public void testPrimitiveType() { PulsarColumnHandle.HandleKeyValueType.NONE), dateValue.getTime()); Time timeValue = new Time(System.currentTimeMillis()); - SchemaInfo schemaInfoTime = SchemaInfoImpl.builder().type(SchemaType.TIME).build(); + SchemaInfo schemaInfoTime = SchemaInfo.builder().type(SchemaType.TIME).build(); Schema schemaTime = Schema.getSchema(schemaInfoTime); List pulsarColumnHandleTime = getColumnColumnHandles(topicName, schemaInfoTime, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); @@ -216,7 +216,7 @@ public void testPrimitiveType() { PulsarColumnHandle.HandleKeyValueType.NONE), timeValue.getTime()); Timestamp timestampValue = new Timestamp(System.currentTimeMillis()); - SchemaInfo schemaInfoTimestamp = SchemaInfoImpl.builder().type(SchemaType.TIMESTAMP).build(); + SchemaInfo schemaInfoTimestamp = SchemaInfo.builder().type(SchemaType.TIMESTAMP).build(); Schema schemaTimestamp = Schema.getSchema(schemaInfoTimestamp); List pulsarColumnHandleTimestamp = getColumnColumnHandles(topicName, schemaInfoTimestamp, PulsarColumnHandle.HandleKeyValueType.NONE, false, decoderFactory); diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.java index 3065caf02968e..401ded5bd85cc 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.java @@ -67,6 +67,8 @@ public final int getNumber() { } /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated @@ -74,6 +76,10 @@ public static TestEnum valueOf(int value) { return forNumber(value); } + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ public static TestEnum forNumber(int value) { switch (value) { case 0: return SHARED; @@ -96,6 +102,10 @@ public TestEnum findValueByNumber(int number) { public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } return getDescriptor().getValues().get(ordinal()); } public final com.google.protobuf.Descriptors.EnumDescriptor @@ -136,25 +146,30 @@ public interface SubMessageOrBuilder extends /** * string foo = 1; + * @return The foo. */ java.lang.String getFoo(); /** * string foo = 1; + * @return The bytes for foo. */ com.google.protobuf.ByteString getFooBytes(); /** * double bar = 2; + * @return The bar. */ double getBar(); /** * .proto.SubMessage.NestedMessage nestedMessage = 3; + * @return Whether the nestedMessage field is set. */ boolean hasNestedMessage(); /** * .proto.SubMessage.NestedMessage nestedMessage = 3; + * @return The nestedMessage. */ org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage getNestedMessage(); /** @@ -165,7 +180,7 @@ public interface SubMessageOrBuilder extends /** * Protobuf type {@code proto.SubMessage} */ - public static final class SubMessage extends + public static final class SubMessage extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:proto.SubMessage) SubMessageOrBuilder { @@ -176,7 +191,13 @@ private SubMessage(com.google.protobuf.GeneratedMessageV3.Builder builder) { } private SubMessage() { foo_ = ""; - bar_ = 0D; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new SubMessage(); } @java.lang.Override @@ -192,7 +213,6 @@ private SubMessage( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -203,13 +223,6 @@ private SubMessage( case 0: done = true; break; - default: { - if (!parseUnknownFieldProto3( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } case 10: { java.lang.String s = input.readStringRequireUtf8(); @@ -234,6 +247,13 @@ private SubMessage( break; } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -251,6 +271,7 @@ private SubMessage( return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_descriptor; } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_fieldAccessorTable @@ -264,29 +285,37 @@ public interface NestedMessageOrBuilder extends /** * string title = 1; + * @return The title. */ java.lang.String getTitle(); /** * string title = 1; + * @return The bytes for title. */ com.google.protobuf.ByteString getTitleBytes(); /** * repeated string urls = 2; + * @return A list containing the urls. */ java.util.List getUrlsList(); /** * repeated string urls = 2; + * @return The count of urls. */ int getUrlsCount(); /** * repeated string urls = 2; + * @param index The index of the element to return. + * @return The urls at the given index. */ java.lang.String getUrls(int index); /** * repeated string urls = 2; + * @param index The index of the value to return. + * @return The bytes of the urls at the given index. */ com.google.protobuf.ByteString getUrlsBytes(int index); @@ -294,7 +323,7 @@ public interface NestedMessageOrBuilder extends /** * Protobuf type {@code proto.SubMessage.NestedMessage} */ - public static final class NestedMessage extends + public static final class NestedMessage extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:proto.SubMessage.NestedMessage) NestedMessageOrBuilder { @@ -308,6 +337,13 @@ private NestedMessage() { urls_ = com.google.protobuf.LazyStringArrayList.EMPTY; } + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new NestedMessage(); + } + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { @@ -332,13 +368,6 @@ private NestedMessage( case 0: done = true; break; - default: { - if (!parseUnknownFieldProto3( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } case 10: { java.lang.String s = input.readStringRequireUtf8(); @@ -347,13 +376,20 @@ private NestedMessage( } case 18: { java.lang.String s = input.readStringRequireUtf8(); - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { urls_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000002; + mutable_bitField0_ |= 0x00000001; } urls_.add(s); break; } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -362,7 +398,7 @@ private NestedMessage( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + if (((mutable_bitField0_ & 0x00000001) != 0)) { urls_ = urls_.getUnmodifiableView(); } this.unknownFields = unknownFields.build(); @@ -374,6 +410,7 @@ private NestedMessage( return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_NestedMessage_descriptor; } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_NestedMessage_fieldAccessorTable @@ -381,12 +418,13 @@ private NestedMessage( org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage.class, org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage.Builder.class); } - private int bitField0_; public static final int TITLE_FIELD_NUMBER = 1; private volatile java.lang.Object title_; /** * string title = 1; + * @return The title. */ + @java.lang.Override public java.lang.String getTitle() { java.lang.Object ref = title_; if (ref instanceof java.lang.String) { @@ -401,7 +439,9 @@ public java.lang.String getTitle() { } /** * string title = 1; + * @return The bytes for title. */ + @java.lang.Override public com.google.protobuf.ByteString getTitleBytes() { java.lang.Object ref = title_; @@ -420,6 +460,7 @@ public java.lang.String getTitle() { private com.google.protobuf.LazyStringList urls_; /** * repeated string urls = 2; + * @return A list containing the urls. */ public com.google.protobuf.ProtocolStringList getUrlsList() { @@ -427,18 +468,23 @@ public java.lang.String getTitle() { } /** * repeated string urls = 2; + * @return The count of urls. */ public int getUrlsCount() { return urls_.size(); } /** * repeated string urls = 2; + * @param index The index of the element to return. + * @return The urls at the given index. */ public java.lang.String getUrls(int index) { return urls_.get(index); } /** * repeated string urls = 2; + * @param index The index of the value to return. + * @return The bytes of the urls at the given index. */ public com.google.protobuf.ByteString getUrlsBytes(int index) { @@ -446,6 +492,7 @@ public java.lang.String getUrls(int index) { } private byte memoizedIsInitialized = -1; + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -455,6 +502,7 @@ public final boolean isInitialized() { return true; } + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!getTitleBytes().isEmpty()) { @@ -466,6 +514,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) unknownFields.writeTo(output); } + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -497,13 +546,12 @@ public boolean equals(final java.lang.Object obj) { } org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage other = (org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage) obj; - boolean result = true; - result = result && getTitle() - .equals(other.getTitle()); - result = result && getUrlsList() - .equals(other.getUrlsList()); - result = result && unknownFields.equals(other.unknownFields); - return result; + if (!getTitle() + .equals(other.getTitle())) return false; + if (!getUrlsList() + .equals(other.getUrlsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; } @java.lang.Override @@ -594,6 +642,7 @@ public static org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMes .parseWithIOException(PARSER, input, extensionRegistry); } + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); @@ -601,6 +650,7 @@ public static Builder newBuilder() { public static Builder newBuilder(org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); @@ -624,6 +674,7 @@ public static final class Builder extends return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_NestedMessage_descriptor; } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_NestedMessage_fieldAccessorTable @@ -646,24 +697,28 @@ private void maybeForceBuilderInitialization() { .alwaysUseFieldBuilders) { } } + @java.lang.Override public Builder clear() { super.clear(); title_ = ""; urls_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000001); return this; } + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_NestedMessage_descriptor; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage getDefaultInstanceForType() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage.getDefaultInstance(); } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage build() { org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage result = buildPartial(); if (!result.isInitialized()) { @@ -672,47 +727,53 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.Ne return result; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage buildPartial() { org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage result = new org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; result.title_ = title_; - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000001) != 0)) { urls_ = urls_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000001); } result.urls_ = urls_; - result.bitField0_ = to_bitField0_; onBuilt(); return result; } + @java.lang.Override public Builder clone() { - return (Builder) super.clone(); + return super.clone(); } + @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return (Builder) super.setField(field, value); + return super.setField(field, value); } + @java.lang.Override public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); + return super.clearField(field); } + @java.lang.Override public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); + return super.clearOneof(oneof); } + @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); + return super.setRepeatedField(field, index, value); } + @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); + return super.addRepeatedField(field, value); } + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage) { return mergeFrom((org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage)other); @@ -731,7 +792,7 @@ public Builder mergeFrom(org.apache.pulsar.sql.presto.decoder.protobufnative.Tes if (!other.urls_.isEmpty()) { if (urls_.isEmpty()) { urls_ = other.urls_; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000001); } else { ensureUrlsIsMutable(); urls_.addAll(other.urls_); @@ -743,10 +804,12 @@ public Builder mergeFrom(org.apache.pulsar.sql.presto.decoder.protobufnative.Tes return this; } + @java.lang.Override public final boolean isInitialized() { return true; } + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -769,6 +832,7 @@ public Builder mergeFrom( private java.lang.Object title_ = ""; /** * string title = 1; + * @return The title. */ public java.lang.String getTitle() { java.lang.Object ref = title_; @@ -784,6 +848,7 @@ public java.lang.String getTitle() { } /** * string title = 1; + * @return The bytes for title. */ public com.google.protobuf.ByteString getTitleBytes() { @@ -800,6 +865,8 @@ public java.lang.String getTitle() { } /** * string title = 1; + * @param value The title to set. + * @return This builder for chaining. */ public Builder setTitle( java.lang.String value) { @@ -813,6 +880,7 @@ public Builder setTitle( } /** * string title = 1; + * @return This builder for chaining. */ public Builder clearTitle() { @@ -822,6 +890,8 @@ public Builder clearTitle() { } /** * string title = 1; + * @param value The bytes for title to set. + * @return This builder for chaining. */ public Builder setTitleBytes( com.google.protobuf.ByteString value) { @@ -837,13 +907,14 @@ public Builder setTitleBytes( private com.google.protobuf.LazyStringList urls_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureUrlsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { + if (!((bitField0_ & 0x00000001) != 0)) { urls_ = new com.google.protobuf.LazyStringArrayList(urls_); - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; } } /** * repeated string urls = 2; + * @return A list containing the urls. */ public com.google.protobuf.ProtocolStringList getUrlsList() { @@ -851,18 +922,23 @@ private void ensureUrlsIsMutable() { } /** * repeated string urls = 2; + * @return The count of urls. */ public int getUrlsCount() { return urls_.size(); } /** * repeated string urls = 2; + * @param index The index of the element to return. + * @return The urls at the given index. */ public java.lang.String getUrls(int index) { return urls_.get(index); } /** * repeated string urls = 2; + * @param index The index of the value to return. + * @return The bytes of the urls at the given index. */ public com.google.protobuf.ByteString getUrlsBytes(int index) { @@ -870,6 +946,9 @@ public java.lang.String getUrls(int index) { } /** * repeated string urls = 2; + * @param index The index to set the value at. + * @param value The urls to set. + * @return This builder for chaining. */ public Builder setUrls( int index, java.lang.String value) { @@ -883,6 +962,8 @@ public Builder setUrls( } /** * repeated string urls = 2; + * @param value The urls to add. + * @return This builder for chaining. */ public Builder addUrls( java.lang.String value) { @@ -896,6 +977,8 @@ public Builder addUrls( } /** * repeated string urls = 2; + * @param values The urls to add. + * @return This builder for chaining. */ public Builder addAllUrls( java.lang.Iterable values) { @@ -907,15 +990,18 @@ public Builder addAllUrls( } /** * repeated string urls = 2; + * @return This builder for chaining. */ public Builder clearUrls() { urls_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string urls = 2; + * @param value The bytes of the urls to add. + * @return This builder for chaining. */ public Builder addUrlsBytes( com.google.protobuf.ByteString value) { @@ -928,11 +1014,13 @@ public Builder addUrlsBytes( onChanged(); return this; } + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFieldsProto3(unknownFields); + return super.setUnknownFields(unknownFields); } + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -954,6 +1042,7 @@ public static org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMes private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override public NestedMessage parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -971,6 +1060,7 @@ public com.google.protobuf.Parser getParserForType() { return PARSER; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -981,7 +1071,9 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.Ne private volatile java.lang.Object foo_; /** * string foo = 1; + * @return The foo. */ + @java.lang.Override public java.lang.String getFoo() { java.lang.Object ref = foo_; if (ref instanceof java.lang.String) { @@ -996,7 +1088,9 @@ public java.lang.String getFoo() { } /** * string foo = 1; + * @return The bytes for foo. */ + @java.lang.Override public com.google.protobuf.ByteString getFooBytes() { java.lang.Object ref = foo_; @@ -1015,7 +1109,9 @@ public java.lang.String getFoo() { private double bar_; /** * double bar = 2; + * @return The bar. */ + @java.lang.Override public double getBar() { return bar_; } @@ -1024,24 +1120,30 @@ public double getBar() { private org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage nestedMessage_; /** * .proto.SubMessage.NestedMessage nestedMessage = 3; + * @return Whether the nestedMessage field is set. */ + @java.lang.Override public boolean hasNestedMessage() { return nestedMessage_ != null; } /** * .proto.SubMessage.NestedMessage nestedMessage = 3; + * @return The nestedMessage. */ + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage getNestedMessage() { return nestedMessage_ == null ? org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage.getDefaultInstance() : nestedMessage_; } /** * .proto.SubMessage.NestedMessage nestedMessage = 3; */ + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessageOrBuilder getNestedMessageOrBuilder() { return getNestedMessage(); } private byte memoizedIsInitialized = -1; + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -1051,6 +1153,7 @@ public final boolean isInitialized() { return true; } + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!getFooBytes().isEmpty()) { @@ -1065,6 +1168,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) unknownFields.writeTo(output); } + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -1096,20 +1200,18 @@ public boolean equals(final java.lang.Object obj) { } org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage other = (org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage) obj; - boolean result = true; - result = result && getFoo() - .equals(other.getFoo()); - result = result && ( - java.lang.Double.doubleToLongBits(getBar()) - == java.lang.Double.doubleToLongBits( - other.getBar())); - result = result && (hasNestedMessage() == other.hasNestedMessage()); + if (!getFoo() + .equals(other.getFoo())) return false; + if (java.lang.Double.doubleToLongBits(getBar()) + != java.lang.Double.doubleToLongBits( + other.getBar())) return false; + if (hasNestedMessage() != other.hasNestedMessage()) return false; if (hasNestedMessage()) { - result = result && getNestedMessage() - .equals(other.getNestedMessage()); + if (!getNestedMessage() + .equals(other.getNestedMessage())) return false; } - result = result && unknownFields.equals(other.unknownFields); - return result; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; } @java.lang.Override @@ -1203,6 +1305,7 @@ public static org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMes .parseWithIOException(PARSER, input, extensionRegistry); } + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); @@ -1210,6 +1313,7 @@ public static Builder newBuilder() { public static Builder newBuilder(org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); @@ -1233,6 +1337,7 @@ public static final class Builder extends return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_descriptor; } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_fieldAccessorTable @@ -1255,6 +1360,7 @@ private void maybeForceBuilderInitialization() { .alwaysUseFieldBuilders) { } } + @java.lang.Override public Builder clear() { super.clear(); foo_ = ""; @@ -1270,15 +1376,18 @@ public Builder clear() { return this; } + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_SubMessage_descriptor; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage getDefaultInstanceForType() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.getDefaultInstance(); } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage build() { org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage result = buildPartial(); if (!result.isInitialized()) { @@ -1287,6 +1396,7 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage bu return result; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage buildPartial() { org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage result = new org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage(this); result.foo_ = foo_; @@ -1300,32 +1410,39 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage bu return result; } + @java.lang.Override public Builder clone() { - return (Builder) super.clone(); + return super.clone(); } + @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return (Builder) super.setField(field, value); + return super.setField(field, value); } + @java.lang.Override public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); + return super.clearField(field); } + @java.lang.Override public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); + return super.clearOneof(oneof); } + @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); + return super.setRepeatedField(field, index, value); } + @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); + return super.addRepeatedField(field, value); } + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage) { return mergeFrom((org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage)other); @@ -1352,10 +1469,12 @@ public Builder mergeFrom(org.apache.pulsar.sql.presto.decoder.protobufnative.Tes return this; } + @java.lang.Override public final boolean isInitialized() { return true; } + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1377,6 +1496,7 @@ public Builder mergeFrom( private java.lang.Object foo_ = ""; /** * string foo = 1; + * @return The foo. */ public java.lang.String getFoo() { java.lang.Object ref = foo_; @@ -1392,6 +1512,7 @@ public java.lang.String getFoo() { } /** * string foo = 1; + * @return The bytes for foo. */ public com.google.protobuf.ByteString getFooBytes() { @@ -1408,6 +1529,8 @@ public java.lang.String getFoo() { } /** * string foo = 1; + * @param value The foo to set. + * @return This builder for chaining. */ public Builder setFoo( java.lang.String value) { @@ -1421,6 +1544,7 @@ public Builder setFoo( } /** * string foo = 1; + * @return This builder for chaining. */ public Builder clearFoo() { @@ -1430,6 +1554,8 @@ public Builder clearFoo() { } /** * string foo = 1; + * @param value The bytes for foo to set. + * @return This builder for chaining. */ public Builder setFooBytes( com.google.protobuf.ByteString value) { @@ -1446,12 +1572,16 @@ public Builder setFooBytes( private double bar_ ; /** * double bar = 2; + * @return The bar. */ + @java.lang.Override public double getBar() { return bar_; } /** * double bar = 2; + * @param value The bar to set. + * @return This builder for chaining. */ public Builder setBar(double value) { @@ -1461,6 +1591,7 @@ public Builder setBar(double value) { } /** * double bar = 2; + * @return This builder for chaining. */ public Builder clearBar() { @@ -1469,17 +1600,19 @@ public Builder clearBar() { return this; } - private org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage nestedMessage_ = null; + private org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage nestedMessage_; private com.google.protobuf.SingleFieldBuilderV3< org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage, org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage.Builder, org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessageOrBuilder> nestedMessageBuilder_; /** * .proto.SubMessage.NestedMessage nestedMessage = 3; + * @return Whether the nestedMessage field is set. */ public boolean hasNestedMessage() { return nestedMessageBuilder_ != null || nestedMessage_ != null; } /** * .proto.SubMessage.NestedMessage nestedMessage = 3; + * @return The nestedMessage. */ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.NestedMessage getNestedMessage() { if (nestedMessageBuilder_ == null) { @@ -1585,11 +1718,13 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.Ne } return nestedMessageBuilder_; } + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFieldsProto3(unknownFields); + return super.setUnknownFields(unknownFields); } + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -1611,6 +1746,7 @@ public static org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMes private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override public SubMessage parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1628,6 +1764,7 @@ public com.google.protobuf.Parser getParserForType() { return PARSER; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -1640,99 +1777,119 @@ public interface TestMessageOrBuilder extends /** * string stringField = 1; + * @return The stringField. */ java.lang.String getStringField(); /** * string stringField = 1; + * @return The bytes for stringField. */ com.google.protobuf.ByteString getStringFieldBytes(); /** * double doubleField = 2; + * @return The doubleField. */ double getDoubleField(); /** * float floatField = 3; + * @return The floatField. */ float getFloatField(); /** * int32 int32Field = 4; + * @return The int32Field. */ int getInt32Field(); /** * int64 int64Field = 5; + * @return The int64Field. */ long getInt64Field(); /** * uint32 uint32Field = 6; + * @return The uint32Field. */ int getUint32Field(); /** * uint64 uint64Field = 7; + * @return The uint64Field. */ long getUint64Field(); /** * sint32 sint32Field = 8; + * @return The sint32Field. */ int getSint32Field(); /** * sint64 sint64Field = 9; + * @return The sint64Field. */ long getSint64Field(); /** * fixed32 fixed32Field = 10; + * @return The fixed32Field. */ int getFixed32Field(); /** * fixed64 fixed64Field = 11; + * @return The fixed64Field. */ long getFixed64Field(); /** * sfixed32 sfixed32Field = 12; + * @return The sfixed32Field. */ int getSfixed32Field(); /** * sfixed64 sfixed64Field = 13; + * @return The sfixed64Field. */ long getSfixed64Field(); /** * bool boolField = 14; + * @return The boolField. */ boolean getBoolField(); /** * bytes bytesField = 15; + * @return The bytesField. */ com.google.protobuf.ByteString getBytesField(); /** * .proto.TestEnum testEnum = 16; + * @return The enum numeric value on the wire for testEnum. */ int getTestEnumValue(); /** * .proto.TestEnum testEnum = 16; + * @return The testEnum. */ org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum getTestEnum(); /** * .proto.SubMessage subMessage = 17; + * @return Whether the subMessage field is set. */ boolean hasSubMessage(); /** * .proto.SubMessage subMessage = 17; + * @return The subMessage. */ org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage getSubMessage(); /** @@ -1742,19 +1899,25 @@ public interface TestMessageOrBuilder extends /** * repeated string repeatedField = 18; + * @return A list containing the repeatedField. */ java.util.List getRepeatedFieldList(); /** * repeated string repeatedField = 18; + * @return The count of repeatedField. */ int getRepeatedFieldCount(); /** * repeated string repeatedField = 18; + * @param index The index of the element to return. + * @return The repeatedField at the given index. */ java.lang.String getRepeatedField(int index); /** * repeated string repeatedField = 18; + * @param index The index of the value to return. + * @return The bytes of the repeatedField at the given index. */ com.google.protobuf.ByteString getRepeatedFieldBytes(int index); @@ -1792,11 +1955,26 @@ boolean containsMapField( double getMapFieldOrThrow( java.lang.String key); + + /** + * .google.protobuf.Timestamp timestampField = 20; + * @return Whether the timestampField field is set. + */ + boolean hasTimestampField(); + /** + * .google.protobuf.Timestamp timestampField = 20; + * @return The timestampField. + */ + com.google.protobuf.Timestamp getTimestampField(); + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + com.google.protobuf.TimestampOrBuilder getTimestampFieldOrBuilder(); } /** * Protobuf type {@code proto.TestMessage} */ - public static final class TestMessage extends + public static final class TestMessage extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:proto.TestMessage) TestMessageOrBuilder { @@ -1807,24 +1985,18 @@ private TestMessage(com.google.protobuf.GeneratedMessageV3.Builder builder) { } private TestMessage() { stringField_ = ""; - doubleField_ = 0D; - floatField_ = 0F; - int32Field_ = 0; - int64Field_ = 0L; - uint32Field_ = 0; - uint64Field_ = 0L; - sint32Field_ = 0; - sint64Field_ = 0L; - fixed32Field_ = 0; - fixed64Field_ = 0L; - sfixed32Field_ = 0; - sfixed64Field_ = 0L; - boolField_ = false; bytesField_ = com.google.protobuf.ByteString.EMPTY; testEnum_ = 0; repeatedField_ = com.google.protobuf.LazyStringArrayList.EMPTY; } + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new TestMessage(); + } + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { @@ -1849,13 +2021,6 @@ private TestMessage( case 0: done = true; break; - default: { - if (!parseUnknownFieldProto3( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } case 10: { java.lang.String s = input.readStringRequireUtf8(); @@ -1953,18 +2118,18 @@ private TestMessage( } case 146: { java.lang.String s = input.readStringRequireUtf8(); - if (!((mutable_bitField0_ & 0x00020000) == 0x00020000)) { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { repeatedField_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00020000; + mutable_bitField0_ |= 0x00000001; } repeatedField_.add(s); break; } case 154: { - if (!((mutable_bitField0_ & 0x00040000) == 0x00040000)) { + if (!((mutable_bitField0_ & 0x00000002) != 0)) { mapField_ = com.google.protobuf.MapField.newMapField( MapFieldDefaultEntryHolder.defaultEntry); - mutable_bitField0_ |= 0x00040000; + mutable_bitField0_ |= 0x00000002; } com.google.protobuf.MapEntry mapField__ = input.readMessage( @@ -1973,6 +2138,26 @@ private TestMessage( mapField__.getKey(), mapField__.getValue()); break; } + case 162: { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (timestampField_ != null) { + subBuilder = timestampField_.toBuilder(); + } + timestampField_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(timestampField_); + timestampField_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -1981,7 +2166,7 @@ private TestMessage( throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00020000) == 0x00020000)) { + if (((mutable_bitField0_ & 0x00000001) != 0)) { repeatedField_ = repeatedField_.getUnmodifiableView(); } this.unknownFields = unknownFields.build(); @@ -1994,6 +2179,7 @@ private TestMessage( } @SuppressWarnings({"rawtypes"}) + @java.lang.Override protected com.google.protobuf.MapField internalGetMapField( int number) { switch (number) { @@ -2004,6 +2190,7 @@ protected com.google.protobuf.MapField internalGetMapField( "Invalid map field number: " + number); } } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_TestMessage_fieldAccessorTable @@ -2011,12 +2198,13 @@ protected com.google.protobuf.MapField internalGetMapField( org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage.class, org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage.Builder.class); } - private int bitField0_; public static final int STRINGFIELD_FIELD_NUMBER = 1; private volatile java.lang.Object stringField_; /** * string stringField = 1; + * @return The stringField. */ + @java.lang.Override public java.lang.String getStringField() { java.lang.Object ref = stringField_; if (ref instanceof java.lang.String) { @@ -2031,7 +2219,9 @@ public java.lang.String getStringField() { } /** * string stringField = 1; + * @return The bytes for stringField. */ + @java.lang.Override public com.google.protobuf.ByteString getStringFieldBytes() { java.lang.Object ref = stringField_; @@ -2050,7 +2240,9 @@ public java.lang.String getStringField() { private double doubleField_; /** * double doubleField = 2; + * @return The doubleField. */ + @java.lang.Override public double getDoubleField() { return doubleField_; } @@ -2059,7 +2251,9 @@ public double getDoubleField() { private float floatField_; /** * float floatField = 3; + * @return The floatField. */ + @java.lang.Override public float getFloatField() { return floatField_; } @@ -2068,7 +2262,9 @@ public float getFloatField() { private int int32Field_; /** * int32 int32Field = 4; + * @return The int32Field. */ + @java.lang.Override public int getInt32Field() { return int32Field_; } @@ -2077,7 +2273,9 @@ public int getInt32Field() { private long int64Field_; /** * int64 int64Field = 5; + * @return The int64Field. */ + @java.lang.Override public long getInt64Field() { return int64Field_; } @@ -2086,7 +2284,9 @@ public long getInt64Field() { private int uint32Field_; /** * uint32 uint32Field = 6; + * @return The uint32Field. */ + @java.lang.Override public int getUint32Field() { return uint32Field_; } @@ -2095,7 +2295,9 @@ public int getUint32Field() { private long uint64Field_; /** * uint64 uint64Field = 7; + * @return The uint64Field. */ + @java.lang.Override public long getUint64Field() { return uint64Field_; } @@ -2104,7 +2306,9 @@ public long getUint64Field() { private int sint32Field_; /** * sint32 sint32Field = 8; + * @return The sint32Field. */ + @java.lang.Override public int getSint32Field() { return sint32Field_; } @@ -2113,7 +2317,9 @@ public int getSint32Field() { private long sint64Field_; /** * sint64 sint64Field = 9; + * @return The sint64Field. */ + @java.lang.Override public long getSint64Field() { return sint64Field_; } @@ -2122,7 +2328,9 @@ public long getSint64Field() { private int fixed32Field_; /** * fixed32 fixed32Field = 10; + * @return The fixed32Field. */ + @java.lang.Override public int getFixed32Field() { return fixed32Field_; } @@ -2131,7 +2339,9 @@ public int getFixed32Field() { private long fixed64Field_; /** * fixed64 fixed64Field = 11; + * @return The fixed64Field. */ + @java.lang.Override public long getFixed64Field() { return fixed64Field_; } @@ -2140,7 +2350,9 @@ public long getFixed64Field() { private int sfixed32Field_; /** * sfixed32 sfixed32Field = 12; + * @return The sfixed32Field. */ + @java.lang.Override public int getSfixed32Field() { return sfixed32Field_; } @@ -2149,7 +2361,9 @@ public int getSfixed32Field() { private long sfixed64Field_; /** * sfixed64 sfixed64Field = 13; + * @return The sfixed64Field. */ + @java.lang.Override public long getSfixed64Field() { return sfixed64Field_; } @@ -2158,7 +2372,9 @@ public long getSfixed64Field() { private boolean boolField_; /** * bool boolField = 14; + * @return The boolField. */ + @java.lang.Override public boolean getBoolField() { return boolField_; } @@ -2167,7 +2383,9 @@ public boolean getBoolField() { private com.google.protobuf.ByteString bytesField_; /** * bytes bytesField = 15; + * @return The bytesField. */ + @java.lang.Override public com.google.protobuf.ByteString getBytesField() { return bytesField_; } @@ -2176,14 +2394,17 @@ public com.google.protobuf.ByteString getBytesField() { private int testEnum_; /** * .proto.TestEnum testEnum = 16; + * @return The enum numeric value on the wire for testEnum. */ - public int getTestEnumValue() { + @java.lang.Override public int getTestEnumValue() { return testEnum_; } /** * .proto.TestEnum testEnum = 16; + * @return The testEnum. */ - public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum getTestEnum() { + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum getTestEnum() { + @SuppressWarnings("deprecation") org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum result = org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum.valueOf(testEnum_); return result == null ? org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum.UNRECOGNIZED : result; } @@ -2192,19 +2413,24 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum getT private org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage subMessage_; /** * .proto.SubMessage subMessage = 17; + * @return Whether the subMessage field is set. */ + @java.lang.Override public boolean hasSubMessage() { return subMessage_ != null; } /** * .proto.SubMessage subMessage = 17; + * @return The subMessage. */ + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage getSubMessage() { return subMessage_ == null ? org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.getDefaultInstance() : subMessage_; } /** * .proto.SubMessage subMessage = 17; */ + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessageOrBuilder getSubMessageOrBuilder() { return getSubMessage(); } @@ -2213,6 +2439,7 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessageOrB private com.google.protobuf.LazyStringList repeatedField_; /** * repeated string repeatedField = 18; + * @return A list containing the repeatedField. */ public com.google.protobuf.ProtocolStringList getRepeatedFieldList() { @@ -2220,18 +2447,23 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessageOrB } /** * repeated string repeatedField = 18; + * @return The count of repeatedField. */ public int getRepeatedFieldCount() { return repeatedField_.size(); } /** * repeated string repeatedField = 18; + * @param index The index of the element to return. + * @return The repeatedField at the given index. */ public java.lang.String getRepeatedField(int index) { return repeatedField_.get(index); } /** * repeated string repeatedField = 18; + * @param index The index of the value to return. + * @return The bytes of the repeatedField at the given index. */ public com.google.protobuf.ByteString getRepeatedFieldBytes(int index) { @@ -2268,6 +2500,7 @@ public int getMapFieldCount() { * map<string, double> mapField = 19; */ + @java.lang.Override public boolean containsMapField( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } @@ -2276,6 +2509,7 @@ public boolean containsMapField( /** * Use {@link #getMapFieldMap()} instead. */ + @java.lang.Override @java.lang.Deprecated public java.util.Map getMapField() { return getMapFieldMap(); @@ -2283,6 +2517,7 @@ public java.util.Map getMapField() { /** * map<string, double> mapField = 19; */ + @java.lang.Override public java.util.Map getMapFieldMap() { return internalGetMapField().getMap(); @@ -2290,6 +2525,7 @@ public java.util.Map getMapFieldMap() { /** * map<string, double> mapField = 19; */ + @java.lang.Override public double getMapFieldOrDefault( java.lang.String key, @@ -2302,6 +2538,7 @@ public double getMapFieldOrDefault( /** * map<string, double> mapField = 19; */ + @java.lang.Override public double getMapFieldOrThrow( java.lang.String key) { @@ -2314,7 +2551,34 @@ public double getMapFieldOrThrow( return map.get(key); } + public static final int TIMESTAMPFIELD_FIELD_NUMBER = 20; + private com.google.protobuf.Timestamp timestampField_; + /** + * .google.protobuf.Timestamp timestampField = 20; + * @return Whether the timestampField field is set. + */ + @java.lang.Override + public boolean hasTimestampField() { + return timestampField_ != null; + } + /** + * .google.protobuf.Timestamp timestampField = 20; + * @return The timestampField. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getTimestampField() { + return timestampField_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : timestampField_; + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getTimestampFieldOrBuilder() { + return getTimestampField(); + } + private byte memoizedIsInitialized = -1; + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -2324,6 +2588,7 @@ public final boolean isInitialized() { return true; } + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!getStringFieldBytes().isEmpty()) { @@ -2386,9 +2651,13 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) internalGetMapField(), MapFieldDefaultEntryHolder.defaultEntry, 19); + if (timestampField_ != null) { + output.writeMessage(20, getTimestampField()); + } unknownFields.writeTo(output); } + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -2479,6 +2748,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(19, mapField__); } + if (timestampField_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(20, getTimestampField()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -2494,53 +2767,55 @@ public boolean equals(final java.lang.Object obj) { } org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage other = (org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage) obj; - boolean result = true; - result = result && getStringField() - .equals(other.getStringField()); - result = result && ( - java.lang.Double.doubleToLongBits(getDoubleField()) - == java.lang.Double.doubleToLongBits( - other.getDoubleField())); - result = result && ( - java.lang.Float.floatToIntBits(getFloatField()) - == java.lang.Float.floatToIntBits( - other.getFloatField())); - result = result && (getInt32Field() - == other.getInt32Field()); - result = result && (getInt64Field() - == other.getInt64Field()); - result = result && (getUint32Field() - == other.getUint32Field()); - result = result && (getUint64Field() - == other.getUint64Field()); - result = result && (getSint32Field() - == other.getSint32Field()); - result = result && (getSint64Field() - == other.getSint64Field()); - result = result && (getFixed32Field() - == other.getFixed32Field()); - result = result && (getFixed64Field() - == other.getFixed64Field()); - result = result && (getSfixed32Field() - == other.getSfixed32Field()); - result = result && (getSfixed64Field() - == other.getSfixed64Field()); - result = result && (getBoolField() - == other.getBoolField()); - result = result && getBytesField() - .equals(other.getBytesField()); - result = result && testEnum_ == other.testEnum_; - result = result && (hasSubMessage() == other.hasSubMessage()); + if (!getStringField() + .equals(other.getStringField())) return false; + if (java.lang.Double.doubleToLongBits(getDoubleField()) + != java.lang.Double.doubleToLongBits( + other.getDoubleField())) return false; + if (java.lang.Float.floatToIntBits(getFloatField()) + != java.lang.Float.floatToIntBits( + other.getFloatField())) return false; + if (getInt32Field() + != other.getInt32Field()) return false; + if (getInt64Field() + != other.getInt64Field()) return false; + if (getUint32Field() + != other.getUint32Field()) return false; + if (getUint64Field() + != other.getUint64Field()) return false; + if (getSint32Field() + != other.getSint32Field()) return false; + if (getSint64Field() + != other.getSint64Field()) return false; + if (getFixed32Field() + != other.getFixed32Field()) return false; + if (getFixed64Field() + != other.getFixed64Field()) return false; + if (getSfixed32Field() + != other.getSfixed32Field()) return false; + if (getSfixed64Field() + != other.getSfixed64Field()) return false; + if (getBoolField() + != other.getBoolField()) return false; + if (!getBytesField() + .equals(other.getBytesField())) return false; + if (testEnum_ != other.testEnum_) return false; + if (hasSubMessage() != other.hasSubMessage()) return false; if (hasSubMessage()) { - result = result && getSubMessage() - .equals(other.getSubMessage()); - } - result = result && getRepeatedFieldList() - .equals(other.getRepeatedFieldList()); - result = result && internalGetMapField().equals( - other.internalGetMapField()); - result = result && unknownFields.equals(other.unknownFields); - return result; + if (!getSubMessage() + .equals(other.getSubMessage())) return false; + } + if (!getRepeatedFieldList() + .equals(other.getRepeatedFieldList())) return false; + if (!internalGetMapField().equals( + other.internalGetMapField())) return false; + if (hasTimestampField() != other.hasTimestampField()) return false; + if (hasTimestampField()) { + if (!getTimestampField() + .equals(other.getTimestampField())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; } @java.lang.Override @@ -2602,6 +2877,10 @@ public int hashCode() { hash = (37 * hash) + MAPFIELD_FIELD_NUMBER; hash = (53 * hash) + internalGetMapField().hashCode(); } + if (hasTimestampField()) { + hash = (37 * hash) + TIMESTAMPFIELD_FIELD_NUMBER; + hash = (53 * hash) + getTimestampField().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -2677,6 +2956,7 @@ public static org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMe .parseWithIOException(PARSER, input, extensionRegistry); } + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); @@ -2684,6 +2964,7 @@ public static Builder newBuilder() { public static Builder newBuilder(org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); @@ -2729,6 +3010,7 @@ protected com.google.protobuf.MapField internalGetMutableMapField( "Invalid map field number: " + number); } } + @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_TestMessage_fieldAccessorTable @@ -2751,6 +3033,7 @@ private void maybeForceBuilderInitialization() { .alwaysUseFieldBuilders) { } } + @java.lang.Override public Builder clear() { super.clear(); stringField_ = ""; @@ -2792,20 +3075,29 @@ public Builder clear() { subMessageBuilder_ = null; } repeatedField_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00020000); + bitField0_ = (bitField0_ & ~0x00000001); internalGetMutableMapField().clear(); + if (timestampFieldBuilder_ == null) { + timestampField_ = null; + } else { + timestampField_ = null; + timestampFieldBuilder_ = null; + } return this; } + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.internal_static_proto_TestMessage_descriptor; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage getDefaultInstanceForType() { return org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage.getDefaultInstance(); } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage build() { org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage result = buildPartial(); if (!result.isInitialized()) { @@ -2814,10 +3106,10 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage b return result; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage buildPartial() { org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage result = new org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; result.stringField_ = stringField_; result.doubleField_ = doubleField_; result.floatField_ = floatField_; @@ -2839,44 +3131,55 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage b } else { result.subMessage_ = subMessageBuilder_.build(); } - if (((bitField0_ & 0x00020000) == 0x00020000)) { + if (((bitField0_ & 0x00000001) != 0)) { repeatedField_ = repeatedField_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00020000); + bitField0_ = (bitField0_ & ~0x00000001); } result.repeatedField_ = repeatedField_; result.mapField_ = internalGetMapField(); result.mapField_.makeImmutable(); - result.bitField0_ = to_bitField0_; + if (timestampFieldBuilder_ == null) { + result.timestampField_ = timestampField_; + } else { + result.timestampField_ = timestampFieldBuilder_.build(); + } onBuilt(); return result; } + @java.lang.Override public Builder clone() { - return (Builder) super.clone(); + return super.clone(); } + @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return (Builder) super.setField(field, value); + return super.setField(field, value); } + @java.lang.Override public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { - return (Builder) super.clearField(field); + return super.clearField(field); } + @java.lang.Override public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return (Builder) super.clearOneof(oneof); + return super.clearOneof(oneof); } + @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { - return (Builder) super.setRepeatedField(field, index, value); + return super.setRepeatedField(field, index, value); } + @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return (Builder) super.addRepeatedField(field, value); + return super.addRepeatedField(field, value); } + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage) { return mergeFrom((org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage)other); @@ -2943,7 +3246,7 @@ public Builder mergeFrom(org.apache.pulsar.sql.presto.decoder.protobufnative.Tes if (!other.repeatedField_.isEmpty()) { if (repeatedField_.isEmpty()) { repeatedField_ = other.repeatedField_; - bitField0_ = (bitField0_ & ~0x00020000); + bitField0_ = (bitField0_ & ~0x00000001); } else { ensureRepeatedFieldIsMutable(); repeatedField_.addAll(other.repeatedField_); @@ -2952,15 +3255,20 @@ public Builder mergeFrom(org.apache.pulsar.sql.presto.decoder.protobufnative.Tes } internalGetMutableMapField().mergeFrom( other.internalGetMapField()); + if (other.hasTimestampField()) { + mergeTimestampField(other.getTimestampField()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } + @java.lang.Override public final boolean isInitialized() { return true; } + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2983,6 +3291,7 @@ public Builder mergeFrom( private java.lang.Object stringField_ = ""; /** * string stringField = 1; + * @return The stringField. */ public java.lang.String getStringField() { java.lang.Object ref = stringField_; @@ -2998,6 +3307,7 @@ public java.lang.String getStringField() { } /** * string stringField = 1; + * @return The bytes for stringField. */ public com.google.protobuf.ByteString getStringFieldBytes() { @@ -3014,6 +3324,8 @@ public java.lang.String getStringField() { } /** * string stringField = 1; + * @param value The stringField to set. + * @return This builder for chaining. */ public Builder setStringField( java.lang.String value) { @@ -3027,6 +3339,7 @@ public Builder setStringField( } /** * string stringField = 1; + * @return This builder for chaining. */ public Builder clearStringField() { @@ -3036,6 +3349,8 @@ public Builder clearStringField() { } /** * string stringField = 1; + * @param value The bytes for stringField to set. + * @return This builder for chaining. */ public Builder setStringFieldBytes( com.google.protobuf.ByteString value) { @@ -3052,12 +3367,16 @@ public Builder setStringFieldBytes( private double doubleField_ ; /** * double doubleField = 2; + * @return The doubleField. */ + @java.lang.Override public double getDoubleField() { return doubleField_; } /** * double doubleField = 2; + * @param value The doubleField to set. + * @return This builder for chaining. */ public Builder setDoubleField(double value) { @@ -3067,6 +3386,7 @@ public Builder setDoubleField(double value) { } /** * double doubleField = 2; + * @return This builder for chaining. */ public Builder clearDoubleField() { @@ -3078,12 +3398,16 @@ public Builder clearDoubleField() { private float floatField_ ; /** * float floatField = 3; + * @return The floatField. */ + @java.lang.Override public float getFloatField() { return floatField_; } /** * float floatField = 3; + * @param value The floatField to set. + * @return This builder for chaining. */ public Builder setFloatField(float value) { @@ -3093,6 +3417,7 @@ public Builder setFloatField(float value) { } /** * float floatField = 3; + * @return This builder for chaining. */ public Builder clearFloatField() { @@ -3104,12 +3429,16 @@ public Builder clearFloatField() { private int int32Field_ ; /** * int32 int32Field = 4; + * @return The int32Field. */ + @java.lang.Override public int getInt32Field() { return int32Field_; } /** * int32 int32Field = 4; + * @param value The int32Field to set. + * @return This builder for chaining. */ public Builder setInt32Field(int value) { @@ -3119,6 +3448,7 @@ public Builder setInt32Field(int value) { } /** * int32 int32Field = 4; + * @return This builder for chaining. */ public Builder clearInt32Field() { @@ -3130,12 +3460,16 @@ public Builder clearInt32Field() { private long int64Field_ ; /** * int64 int64Field = 5; + * @return The int64Field. */ + @java.lang.Override public long getInt64Field() { return int64Field_; } /** * int64 int64Field = 5; + * @param value The int64Field to set. + * @return This builder for chaining. */ public Builder setInt64Field(long value) { @@ -3145,6 +3479,7 @@ public Builder setInt64Field(long value) { } /** * int64 int64Field = 5; + * @return This builder for chaining. */ public Builder clearInt64Field() { @@ -3156,12 +3491,16 @@ public Builder clearInt64Field() { private int uint32Field_ ; /** * uint32 uint32Field = 6; + * @return The uint32Field. */ + @java.lang.Override public int getUint32Field() { return uint32Field_; } /** * uint32 uint32Field = 6; + * @param value The uint32Field to set. + * @return This builder for chaining. */ public Builder setUint32Field(int value) { @@ -3171,6 +3510,7 @@ public Builder setUint32Field(int value) { } /** * uint32 uint32Field = 6; + * @return This builder for chaining. */ public Builder clearUint32Field() { @@ -3182,12 +3522,16 @@ public Builder clearUint32Field() { private long uint64Field_ ; /** * uint64 uint64Field = 7; + * @return The uint64Field. */ + @java.lang.Override public long getUint64Field() { return uint64Field_; } /** * uint64 uint64Field = 7; + * @param value The uint64Field to set. + * @return This builder for chaining. */ public Builder setUint64Field(long value) { @@ -3197,6 +3541,7 @@ public Builder setUint64Field(long value) { } /** * uint64 uint64Field = 7; + * @return This builder for chaining. */ public Builder clearUint64Field() { @@ -3208,12 +3553,16 @@ public Builder clearUint64Field() { private int sint32Field_ ; /** * sint32 sint32Field = 8; + * @return The sint32Field. */ + @java.lang.Override public int getSint32Field() { return sint32Field_; } /** * sint32 sint32Field = 8; + * @param value The sint32Field to set. + * @return This builder for chaining. */ public Builder setSint32Field(int value) { @@ -3223,6 +3572,7 @@ public Builder setSint32Field(int value) { } /** * sint32 sint32Field = 8; + * @return This builder for chaining. */ public Builder clearSint32Field() { @@ -3234,12 +3584,16 @@ public Builder clearSint32Field() { private long sint64Field_ ; /** * sint64 sint64Field = 9; + * @return The sint64Field. */ + @java.lang.Override public long getSint64Field() { return sint64Field_; } /** * sint64 sint64Field = 9; + * @param value The sint64Field to set. + * @return This builder for chaining. */ public Builder setSint64Field(long value) { @@ -3249,6 +3603,7 @@ public Builder setSint64Field(long value) { } /** * sint64 sint64Field = 9; + * @return This builder for chaining. */ public Builder clearSint64Field() { @@ -3260,12 +3615,16 @@ public Builder clearSint64Field() { private int fixed32Field_ ; /** * fixed32 fixed32Field = 10; + * @return The fixed32Field. */ + @java.lang.Override public int getFixed32Field() { return fixed32Field_; } /** * fixed32 fixed32Field = 10; + * @param value The fixed32Field to set. + * @return This builder for chaining. */ public Builder setFixed32Field(int value) { @@ -3275,6 +3634,7 @@ public Builder setFixed32Field(int value) { } /** * fixed32 fixed32Field = 10; + * @return This builder for chaining. */ public Builder clearFixed32Field() { @@ -3286,12 +3646,16 @@ public Builder clearFixed32Field() { private long fixed64Field_ ; /** * fixed64 fixed64Field = 11; + * @return The fixed64Field. */ + @java.lang.Override public long getFixed64Field() { return fixed64Field_; } /** * fixed64 fixed64Field = 11; + * @param value The fixed64Field to set. + * @return This builder for chaining. */ public Builder setFixed64Field(long value) { @@ -3301,6 +3665,7 @@ public Builder setFixed64Field(long value) { } /** * fixed64 fixed64Field = 11; + * @return This builder for chaining. */ public Builder clearFixed64Field() { @@ -3312,12 +3677,16 @@ public Builder clearFixed64Field() { private int sfixed32Field_ ; /** * sfixed32 sfixed32Field = 12; + * @return The sfixed32Field. */ + @java.lang.Override public int getSfixed32Field() { return sfixed32Field_; } /** * sfixed32 sfixed32Field = 12; + * @param value The sfixed32Field to set. + * @return This builder for chaining. */ public Builder setSfixed32Field(int value) { @@ -3327,6 +3696,7 @@ public Builder setSfixed32Field(int value) { } /** * sfixed32 sfixed32Field = 12; + * @return This builder for chaining. */ public Builder clearSfixed32Field() { @@ -3338,12 +3708,16 @@ public Builder clearSfixed32Field() { private long sfixed64Field_ ; /** * sfixed64 sfixed64Field = 13; + * @return The sfixed64Field. */ + @java.lang.Override public long getSfixed64Field() { return sfixed64Field_; } /** * sfixed64 sfixed64Field = 13; + * @param value The sfixed64Field to set. + * @return This builder for chaining. */ public Builder setSfixed64Field(long value) { @@ -3353,6 +3727,7 @@ public Builder setSfixed64Field(long value) { } /** * sfixed64 sfixed64Field = 13; + * @return This builder for chaining. */ public Builder clearSfixed64Field() { @@ -3364,12 +3739,16 @@ public Builder clearSfixed64Field() { private boolean boolField_ ; /** * bool boolField = 14; + * @return The boolField. */ + @java.lang.Override public boolean getBoolField() { return boolField_; } /** * bool boolField = 14; + * @param value The boolField to set. + * @return This builder for chaining. */ public Builder setBoolField(boolean value) { @@ -3379,6 +3758,7 @@ public Builder setBoolField(boolean value) { } /** * bool boolField = 14; + * @return This builder for chaining. */ public Builder clearBoolField() { @@ -3390,12 +3770,16 @@ public Builder clearBoolField() { private com.google.protobuf.ByteString bytesField_ = com.google.protobuf.ByteString.EMPTY; /** * bytes bytesField = 15; + * @return The bytesField. */ + @java.lang.Override public com.google.protobuf.ByteString getBytesField() { return bytesField_; } /** * bytes bytesField = 15; + * @param value The bytesField to set. + * @return This builder for chaining. */ public Builder setBytesField(com.google.protobuf.ByteString value) { if (value == null) { @@ -3408,6 +3792,7 @@ public Builder setBytesField(com.google.protobuf.ByteString value) { } /** * bytes bytesField = 15; + * @return This builder for chaining. */ public Builder clearBytesField() { @@ -3419,27 +3804,36 @@ public Builder clearBytesField() { private int testEnum_ = 0; /** * .proto.TestEnum testEnum = 16; + * @return The enum numeric value on the wire for testEnum. */ - public int getTestEnumValue() { + @java.lang.Override public int getTestEnumValue() { return testEnum_; } /** * .proto.TestEnum testEnum = 16; + * @param value The enum numeric value on the wire for testEnum to set. + * @return This builder for chaining. */ public Builder setTestEnumValue(int value) { + testEnum_ = value; onChanged(); return this; } /** * .proto.TestEnum testEnum = 16; + * @return The testEnum. */ + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum getTestEnum() { + @SuppressWarnings("deprecation") org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum result = org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum.valueOf(testEnum_); return result == null ? org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum.UNRECOGNIZED : result; } /** * .proto.TestEnum testEnum = 16; + * @param value The testEnum to set. + * @return This builder for chaining. */ public Builder setTestEnum(org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestEnum value) { if (value == null) { @@ -3452,6 +3846,7 @@ public Builder setTestEnum(org.apache.pulsar.sql.presto.decoder.protobufnative.T } /** * .proto.TestEnum testEnum = 16; + * @return This builder for chaining. */ public Builder clearTestEnum() { @@ -3460,17 +3855,19 @@ public Builder clearTestEnum() { return this; } - private org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage subMessage_ = null; + private org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage subMessage_; private com.google.protobuf.SingleFieldBuilderV3< org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage, org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage.Builder, org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessageOrBuilder> subMessageBuilder_; /** * .proto.SubMessage subMessage = 17; + * @return Whether the subMessage field is set. */ public boolean hasSubMessage() { return subMessageBuilder_ != null || subMessage_ != null; } /** * .proto.SubMessage subMessage = 17; + * @return The subMessage. */ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessage getSubMessage() { if (subMessageBuilder_ == null) { @@ -3579,13 +3976,14 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.SubMessageOrB private com.google.protobuf.LazyStringList repeatedField_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureRepeatedFieldIsMutable() { - if (!((bitField0_ & 0x00020000) == 0x00020000)) { + if (!((bitField0_ & 0x00000001) != 0)) { repeatedField_ = new com.google.protobuf.LazyStringArrayList(repeatedField_); - bitField0_ |= 0x00020000; + bitField0_ |= 0x00000001; } } /** * repeated string repeatedField = 18; + * @return A list containing the repeatedField. */ public com.google.protobuf.ProtocolStringList getRepeatedFieldList() { @@ -3593,18 +3991,23 @@ private void ensureRepeatedFieldIsMutable() { } /** * repeated string repeatedField = 18; + * @return The count of repeatedField. */ public int getRepeatedFieldCount() { return repeatedField_.size(); } /** * repeated string repeatedField = 18; + * @param index The index of the element to return. + * @return The repeatedField at the given index. */ public java.lang.String getRepeatedField(int index) { return repeatedField_.get(index); } /** * repeated string repeatedField = 18; + * @param index The index of the value to return. + * @return The bytes of the repeatedField at the given index. */ public com.google.protobuf.ByteString getRepeatedFieldBytes(int index) { @@ -3612,6 +4015,9 @@ public java.lang.String getRepeatedField(int index) { } /** * repeated string repeatedField = 18; + * @param index The index to set the value at. + * @param value The repeatedField to set. + * @return This builder for chaining. */ public Builder setRepeatedField( int index, java.lang.String value) { @@ -3625,6 +4031,8 @@ public Builder setRepeatedField( } /** * repeated string repeatedField = 18; + * @param value The repeatedField to add. + * @return This builder for chaining. */ public Builder addRepeatedField( java.lang.String value) { @@ -3638,6 +4046,8 @@ public Builder addRepeatedField( } /** * repeated string repeatedField = 18; + * @param values The repeatedField to add. + * @return This builder for chaining. */ public Builder addAllRepeatedField( java.lang.Iterable values) { @@ -3649,15 +4059,18 @@ public Builder addAllRepeatedField( } /** * repeated string repeatedField = 18; + * @return This builder for chaining. */ public Builder clearRepeatedField() { repeatedField_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00020000); + bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * repeated string repeatedField = 18; + * @param value The bytes of the repeatedField to add. + * @return This builder for chaining. */ public Builder addRepeatedFieldBytes( com.google.protobuf.ByteString value) { @@ -3701,6 +4114,7 @@ public int getMapFieldCount() { * map<string, double> mapField = 19; */ + @java.lang.Override public boolean containsMapField( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } @@ -3709,6 +4123,7 @@ public boolean containsMapField( /** * Use {@link #getMapFieldMap()} instead. */ + @java.lang.Override @java.lang.Deprecated public java.util.Map getMapField() { return getMapFieldMap(); @@ -3716,6 +4131,7 @@ public java.util.Map getMapField() { /** * map<string, double> mapField = 19; */ + @java.lang.Override public java.util.Map getMapFieldMap() { return internalGetMapField().getMap(); @@ -3723,6 +4139,7 @@ public java.util.Map getMapFieldMap() { /** * map<string, double> mapField = 19; */ + @java.lang.Override public double getMapFieldOrDefault( java.lang.String key, @@ -3735,6 +4152,7 @@ public double getMapFieldOrDefault( /** * map<string, double> mapField = 19; */ + @java.lang.Override public double getMapFieldOrThrow( java.lang.String key) { @@ -3793,11 +4211,132 @@ public Builder putAllMapField( .putAll(values); return this; } + + private com.google.protobuf.Timestamp timestampField_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> timestampFieldBuilder_; + /** + * .google.protobuf.Timestamp timestampField = 20; + * @return Whether the timestampField field is set. + */ + public boolean hasTimestampField() { + return timestampFieldBuilder_ != null || timestampField_ != null; + } + /** + * .google.protobuf.Timestamp timestampField = 20; + * @return The timestampField. + */ + public com.google.protobuf.Timestamp getTimestampField() { + if (timestampFieldBuilder_ == null) { + return timestampField_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : timestampField_; + } else { + return timestampFieldBuilder_.getMessage(); + } + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + public Builder setTimestampField(com.google.protobuf.Timestamp value) { + if (timestampFieldBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestampField_ = value; + onChanged(); + } else { + timestampFieldBuilder_.setMessage(value); + } + + return this; + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + public Builder setTimestampField( + com.google.protobuf.Timestamp.Builder builderForValue) { + if (timestampFieldBuilder_ == null) { + timestampField_ = builderForValue.build(); + onChanged(); + } else { + timestampFieldBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + public Builder mergeTimestampField(com.google.protobuf.Timestamp value) { + if (timestampFieldBuilder_ == null) { + if (timestampField_ != null) { + timestampField_ = + com.google.protobuf.Timestamp.newBuilder(timestampField_).mergeFrom(value).buildPartial(); + } else { + timestampField_ = value; + } + onChanged(); + } else { + timestampFieldBuilder_.mergeFrom(value); + } + + return this; + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + public Builder clearTimestampField() { + if (timestampFieldBuilder_ == null) { + timestampField_ = null; + onChanged(); + } else { + timestampField_ = null; + timestampFieldBuilder_ = null; + } + + return this; + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + public com.google.protobuf.Timestamp.Builder getTimestampFieldBuilder() { + + onChanged(); + return getTimestampFieldFieldBuilder().getBuilder(); + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + public com.google.protobuf.TimestampOrBuilder getTimestampFieldOrBuilder() { + if (timestampFieldBuilder_ != null) { + return timestampFieldBuilder_.getMessageOrBuilder(); + } else { + return timestampField_ == null ? + com.google.protobuf.Timestamp.getDefaultInstance() : timestampField_; + } + } + /** + * .google.protobuf.Timestamp timestampField = 20; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> + getTimestampFieldFieldBuilder() { + if (timestampFieldBuilder_ == null) { + timestampFieldBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( + getTimestampField(), + getParentForChildren(), + isClean()); + timestampField_ = null; + } + return timestampFieldBuilder_; + } + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFieldsProto3(unknownFields); + return super.setUnknownFields(unknownFields); } + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -3819,6 +4358,7 @@ public static org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMe private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override public TestMessage parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -3836,6 +4376,7 @@ public com.google.protobuf.Parser getParserForType() { return PARSER; } + @java.lang.Override public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -3871,40 +4412,35 @@ public org.apache.pulsar.sql.presto.decoder.protobufnative.TestMsg.TestMessage g descriptor; static { java.lang.String[] descriptorData = { - "\n\rTestMsg.proto\022\005proto\"\214\001\n\nSubMessage\022\013\n" + - "\003foo\030\001 \001(\t\022\013\n\003bar\030\002 \001(\001\0226\n\rnestedMessage" + - "\030\003 \001(\0132\037.proto.SubMessage.NestedMessage\032" + - ",\n\rNestedMessage\022\r\n\005title\030\001 \001(\t\022\014\n\004urls\030" + - "\002 \003(\t\"\216\004\n\013TestMessage\022\023\n\013stringField\030\001 \001" + - "(\t\022\023\n\013doubleField\030\002 \001(\001\022\022\n\nfloatField\030\003 " + - "\001(\002\022\022\n\nint32Field\030\004 \001(\005\022\022\n\nint64Field\030\005 " + - "\001(\003\022\023\n\013uint32Field\030\006 \001(\r\022\023\n\013uint64Field\030" + - "\007 \001(\004\022\023\n\013sint32Field\030\010 \001(\021\022\023\n\013sint64Fiel" + - "d\030\t \001(\022\022\024\n\014fixed32Field\030\n \001(\007\022\024\n\014fixed64" + - "Field\030\013 \001(\006\022\025\n\rsfixed32Field\030\014 \001(\017\022\025\n\rsf" + - "ixed64Field\030\r \001(\020\022\021\n\tboolField\030\016 \001(\010\022\022\n\n" + - "bytesField\030\017 \001(\014\022!\n\010testEnum\030\020 \001(\0162\017.pro" + - "to.TestEnum\022%\n\nsubMessage\030\021 \001(\0132\021.proto." + - "SubMessage\022\025\n\rrepeatedField\030\022 \003(\t\0222\n\010map" + - "Field\030\023 \003(\0132 .proto.TestMessage.MapField" + - "Entry\032/\n\rMapFieldEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005v" + - "alue\030\002 \001(\001:\0028\001*$\n\010TestEnum\022\n\n\006SHARED\020\000\022\014" + - "\n\010FAILOVER\020\001B>\n3org.apache.pulsar.sql.pr" + - "esto.decoder.protobufnativeB\007TestMsgb\006pr" + - "oto3" + "\n\rTestMsg.proto\022\005proto\032\037google/protobuf/" + + "timestamp.proto\"\214\001\n\nSubMessage\022\013\n\003foo\030\001 " + + "\001(\t\022\013\n\003bar\030\002 \001(\001\0226\n\rnestedMessage\030\003 \001(\0132" + + "\037.proto.SubMessage.NestedMessage\032,\n\rNest" + + "edMessage\022\r\n\005title\030\001 \001(\t\022\014\n\004urls\030\002 \003(\t\"\302" + + "\004\n\013TestMessage\022\023\n\013stringField\030\001 \001(\t\022\023\n\013d" + + "oubleField\030\002 \001(\001\022\022\n\nfloatField\030\003 \001(\002\022\022\n\n" + + "int32Field\030\004 \001(\005\022\022\n\nint64Field\030\005 \001(\003\022\023\n\013" + + "uint32Field\030\006 \001(\r\022\023\n\013uint64Field\030\007 \001(\004\022\023" + + "\n\013sint32Field\030\010 \001(\021\022\023\n\013sint64Field\030\t \001(\022" + + "\022\024\n\014fixed32Field\030\n \001(\007\022\024\n\014fixed64Field\030\013" + + " \001(\006\022\025\n\rsfixed32Field\030\014 \001(\017\022\025\n\rsfixed64F" + + "ield\030\r \001(\020\022\021\n\tboolField\030\016 \001(\010\022\022\n\nbytesFi" + + "eld\030\017 \001(\014\022!\n\010testEnum\030\020 \001(\0162\017.proto.Test" + + "Enum\022%\n\nsubMessage\030\021 \001(\0132\021.proto.SubMess" + + "age\022\025\n\rrepeatedField\030\022 \003(\t\0222\n\010mapField\030\023" + + " \003(\0132 .proto.TestMessage.MapFieldEntry\0222" + + "\n\016timestampField\030\024 \001(\0132\032.google.protobuf" + + ".Timestamp\032/\n\rMapFieldEntry\022\013\n\003key\030\001 \001(\t" + + "\022\r\n\005value\030\002 \001(\001:\0028\001*$\n\010TestEnum\022\n\n\006SHARE" + + "D\020\000\022\014\n\010FAILOVER\020\001B>\n3org.apache.pulsar.s" + + "ql.presto.decoder.protobufnativeB\007TestMs" + + "gP\000b\006proto3" }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor + descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { - }, assigner); + com.google.protobuf.TimestampProto.getDescriptor(), + }); internal_static_proto_SubMessage_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_proto_SubMessage_fieldAccessorTable = new @@ -3922,13 +4458,14 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_proto_TestMessage_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_proto_TestMessage_descriptor, - new java.lang.String[] { "StringField", "DoubleField", "FloatField", "Int32Field", "Int64Field", "Uint32Field", "Uint64Field", "Sint32Field", "Sint64Field", "Fixed32Field", "Fixed64Field", "Sfixed32Field", "Sfixed64Field", "BoolField", "BytesField", "TestEnum", "SubMessage", "RepeatedField", "MapField", }); + new java.lang.String[] { "StringField", "DoubleField", "FloatField", "Int32Field", "Int64Field", "Uint32Field", "Uint64Field", "Sint32Field", "Sint64Field", "Fixed32Field", "Fixed64Field", "Sfixed32Field", "Sfixed64Field", "BoolField", "BytesField", "TestEnum", "SubMessage", "RepeatedField", "MapField", "TimestampField", }); internal_static_proto_TestMessage_MapFieldEntry_descriptor = internal_static_proto_TestMessage_descriptor.getNestedTypes().get(0); internal_static_proto_TestMessage_MapFieldEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_proto_TestMessage_MapFieldEntry_descriptor, new java.lang.String[] { "Key", "Value", }); + com.google.protobuf.TimestampProto.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.proto b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.proto index 0a0f48917ea88..fd522bdd62652 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.proto +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestMsg.proto @@ -19,6 +19,7 @@ syntax = "proto3"; package proto; +import public "google/protobuf/timestamp.proto"; option java_package = "org.apache.pulsar.sql.presto.decoder.protobufnative"; option java_outer_classname = "TestMsg"; @@ -58,4 +59,5 @@ message TestMessage { SubMessage subMessage = 17; repeated string repeatedField = 18; map mapField = 19; + google.protobuf.Timestamp timestampField = 20; } \ No newline at end of file diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestProtobufNativeDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestProtobufNativeDecoder.java index fc1834cae887f..c4c6cb2e6f31d 100644 --- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestProtobufNativeDecoder.java +++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/protobufnative/TestProtobufNativeDecoder.java @@ -20,6 +20,7 @@ import com.google.common.collect.ImmutableList; import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; import io.netty.buffer.ByteBuf; import io.prestosql.decoder.DecoderColumnHandle; import io.prestosql.decoder.FieldValueProvider; @@ -43,6 +44,7 @@ import static io.prestosql.spi.type.BooleanType.BOOLEAN; import static io.prestosql.spi.type.DoubleType.DOUBLE; import static io.prestosql.spi.type.IntegerType.INTEGER; +import static io.prestosql.spi.type.TimestampType.TIMESTAMP; import static io.prestosql.spi.type.VarbinaryType.VARBINARY; import static io.prestosql.spi.type.VarcharType.VARCHAR; import static org.apache.pulsar.sql.presto.TestPulsarConnector.getPulsarConnectorId; @@ -65,6 +67,12 @@ public void init() { @Test public void testPrimitiveType() { + //Time: 2921-1-1 + long mills = 30010669261001L; + Timestamp timestamp = Timestamp.newBuilder() + .setSeconds(mills / 1000) + .setNanos((int) (mills % 1000) * 1000000) + .build(); TestMsg.TestMessage testMessage = TestMsg.TestMessage.newBuilder() .setStringField("aaa") @@ -83,6 +91,7 @@ public void testPrimitiveType() { .setBoolField(true) .setBytesField(ByteString.copyFrom("abc".getBytes())) .setTestEnum(TestMsg.TestEnum.FAILOVER) + .setTimestampField(timestamp) .build(); ByteBuf payload = io.netty.buffer.Unpooled @@ -163,6 +172,11 @@ public void testPrimitiveType() { PulsarColumnHandle.HandleKeyValueType.NONE); checkValue(decodedRow, enumFieldColumnHandle, testMessage.getTestEnum().name()); + PulsarColumnHandle timestampFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(), + "timestampField", TIMESTAMP,false,false,"timestampField",null,null, + PulsarColumnHandle.HandleKeyValueType.NONE); + checkValue(decodedRow, timestampFieldColumnHandle, mills); + } @Test diff --git a/pulsar-testclient/pom.xml b/pulsar-testclient/pom.xml index 18f80a04f0be3..a2aea122c563c 100644 --- a/pulsar-testclient/pom.xml +++ b/pulsar-testclient/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/CmdGenerateDocumentation.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/CmdGenerateDocumentation.java index c316fc4c204c5..29c38aca7944d 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/CmdGenerateDocumentation.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/CmdGenerateDocumentation.java @@ -64,6 +64,7 @@ public static void main(String[] args) throws Exception { Map> cmdClassMap = new LinkedHashMap<>(); cmdClassMap.put("produce", Class.forName("org.apache.pulsar.testclient.PerformanceProducer$Arguments")); cmdClassMap.put("consume", Class.forName("org.apache.pulsar.testclient.PerformanceConsumer$Arguments")); + cmdClassMap.put("transaction", Class.forName("org.apache.pulsar.testclient.PerformanceTransaction$Arguments")); cmdClassMap.put("read", Class.forName("org.apache.pulsar.testclient.PerformanceReader$Arguments")); cmdClassMap.put("monitor-brokers", Class.forName("org.apache.pulsar.testclient.BrokerMonitor$Arguments")); cmdClassMap.put("simulation-client", Class.forName("org.apache.pulsar.testclient.LoadSimulationClient$MainArguments")); diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/DefaultMessageFormatter.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/DefaultMessageFormatter.java index e619a00027946..bf183a6d24b1d 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/DefaultMessageFormatter.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/DefaultMessageFormatter.java @@ -50,7 +50,7 @@ public byte[] formatMessage(String producerName, long msgId, byte[] message) { break; } if (i != 1) { - size = Float.valueOf(new String(sMessage.substring(idx+1,idx+i))); + size = Float.parseFloat(sMessage.substring(idx + 1, idx + i)); } String sub = sMessage.substring(idx, idx+i+1); diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java index 52ba73e6781b8..77bef8c52b380 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceConsumer.java @@ -26,16 +26,22 @@ import com.beust.jcommander.ParameterException; import com.beust.jcommander.Parameters; import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.PrintStream; import java.nio.ByteBuffer; import java.text.DecimalFormat; import java.util.Collections; import java.util.List; import java.util.Properties; import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; import org.HdrHistogram.Histogram; +import org.HdrHistogram.HistogramLogWriter; import org.HdrHistogram.Recorder; import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.Consumer; @@ -45,6 +51,7 @@ import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.common.naming.TopicName; import org.apache.pulsar.testclient.utils.PaddingDecimalFormat; import org.slf4j.Logger; @@ -64,8 +71,19 @@ public class PerformanceConsumer { private static final LongAdder totalMessagesReceived = new LongAdder(); private static final LongAdder totalBytesReceived = new LongAdder(); - private static Recorder recorder = new Recorder(TimeUnit.DAYS.toMillis(10), 5); - private static Recorder cumulativeRecorder = new Recorder(TimeUnit.DAYS.toMillis(10), 5); + private static final LongAdder totalNumTxnOpenFail = new LongAdder(); + private static final LongAdder totalNumTxnOpenSuccess = new LongAdder(); + + private static final LongAdder totalMessageAck = new LongAdder(); + private static final LongAdder totalMessageAckFailed = new LongAdder(); + private static final LongAdder messageAck = new LongAdder(); + + private static final LongAdder totalEndTxnOpFailNum = new LongAdder(); + private static final LongAdder totalEndTxnOpSuccessNum = new LongAdder(); + private static final LongAdder numTxnOpSuccess = new LongAdder(); + + private static final Recorder recorder = new Recorder(TimeUnit.DAYS.toMillis(10), 5); + private static final Recorder cumulativeRecorder = new Recorder(TimeUnit.DAYS.toMillis(10), 5); @Parameters(commandDescription = "Test pulsar consumer performance.") static class Arguments { @@ -91,7 +109,7 @@ static class Arguments { @Parameter(names = { "-s", "--subscriber-name" }, description = "Subscriber name prefix", hidden = true) public String subscriberName; - @Parameter(names = { "-ss", "--subscriptions" }, description = "A list of subscriptions to consume on (e.g. sub1,sub2)") + @Parameter(names = { "-ss", "--subscriptions" }, description = "A list of subscriptions to consume (for example, sub1,sub2)") public List subscriptions = Collections.singletonList("sub"); @Parameter(names = { "-st", "--subscription-type" }, description = "Subscription type") @@ -174,9 +192,13 @@ static class Arguments { public long testTime = 0; @Parameter(names = {"-ioThreads", "--num-io-threads"}, description = "Set the number of threads to be " + - "used for handling connections to brokers, default is 1 thread") + "used for handling connections to brokers. The default value is 1.") public int ioThreads = 1; + @Parameter(names = {"-lt", "--num-listener-threads"}, description = "Set the number of threads" + + " to be used for message listeners") + public int listenerThreads = 1; + @Parameter(names = {"--batch-index-ack" }, description = "Enable or disable the batch index acknowledgment") public boolean batchIndexAck = false; @@ -185,6 +207,29 @@ static class Arguments { @Parameter(names = {"-bw", "--busy-wait"}, description = "Enable Busy-Wait on the Pulsar client") public boolean enableBusyWait = false; + + @Parameter(names = {"-tto", "--txn-timeout"}, description = "Set the time value of transaction timeout," + + " and the time unit is second. (After --txn-enable setting to true, --txn-timeout takes effect)") + public long transactionTimeout = 10; + + @Parameter(names = {"-nmt", "--numMessage-perTransaction"}, + description = "The number of messages acknowledged by a transaction. " + + "(After --txn-enable setting to true, -numMessage-perTransaction takes effect") + public int numMessagesPerTransaction = 50; + + @Parameter(names = {"-txn", "--txn-enable"}, description = "Enable or disable the transaction") + public boolean isEnableTransaction = false; + + @Parameter(names = {"-ntxn"}, description = "The number of opened transactions, 0 means keeping open." + + "(After --txn-enable setting to true, -ntxn takes effect.)") + public long totalNumTxn = 0; + + @Parameter(names = {"-abort"}, description = "Abort the transaction. (After --txn-enable " + + "setting to true, -abort takes effect)") + public boolean isAbortTransaction = false; + + @Parameter(names = { "--histogram-file" }, description = "HdrHistogram output file") + public String histogramFile = null; } public static void main(String[] args) throws Exception { @@ -293,47 +338,14 @@ public static void main(String[] args) throws Exception { final RateLimiter limiter = arguments.rate > 0 ? RateLimiter.create(arguments.rate) : null; long startTime = System.nanoTime(); long testEndTime = startTime + (long) (arguments.testTime * 1e9); - MessageListener listener = (consumer, msg) -> { - if (arguments.testTime > 0) { - if (System.nanoTime() > testEndTime) { - log.info("------------- DONE (reached the maximum duration: [{} seconds] of consumption) --------------", arguments.testTime); - printAggregatedStats(); - PerfClientUtils.exit(0); - } - } - if (arguments.numMessages > 0 && totalMessagesReceived.sum() >= arguments.numMessages) { - log.info("------------- DONE (reached the maximum number: [{}] of consumption) --------------", arguments.numMessages); - printAggregatedStats(); - PerfClientUtils.exit(0); - } - messagesReceived.increment(); - bytesReceived.add(msg.size()); - - totalMessagesReceived.increment(); - totalBytesReceived.add(msg.size()); - - if (limiter != null) { - limiter.acquire(); - } - - long latencyMillis = System.currentTimeMillis() - msg.getPublishTime(); - if (latencyMillis >= 0) { - recorder.recordValue(latencyMillis); - cumulativeRecorder.recordValue(latencyMillis); - } - - consumer.acknowledgeAsync(msg); - - if(arguments.poolMessages) { - msg.release(); - } - }; ClientBuilder clientBuilder = PulsarClient.builder() // + .enableTransaction(arguments.isEnableTransaction) .serviceUrl(arguments.serviceURL) // .connectionsPerBroker(arguments.maxConnections) // .statsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS) // .ioThreads(arguments.ioThreads) // + .listenerThreads(arguments.listenerThreads) .enableBusyWait(arguments.enableBusyWait) .tlsTrustCertsFilePath(arguments.tlsTrustCertsFilePath); if (isNotBlank(arguments.authPluginClassName)) { @@ -347,9 +359,131 @@ public static void main(String[] args) throws Exception { if (isNotBlank(arguments.listenerName)) { clientBuilder.listenerName(arguments.listenerName); } - PulsarClient pulsarClient = clientBuilder.build(); + AtomicReference atomicReference; + if (arguments.isEnableTransaction) { + atomicReference = new AtomicReference<>(pulsarClient.newTransaction() + .withTransactionTimeout(arguments.transactionTimeout, TimeUnit.SECONDS).build().get()); + } else { + atomicReference = new AtomicReference<>(null); + } + + AtomicLong messageAckedCount = new AtomicLong(); + Semaphore messageReceiveLimiter = new Semaphore(arguments.numMessagesPerTransaction); + Thread thread = Thread.currentThread(); + MessageListener listener = (consumer, msg) -> { + if (arguments.testTime > 0) { + if (System.nanoTime() > testEndTime) { + log.info("------------------- DONE -----------------------"); + printAggregatedStats(); + PerfClientUtils.exit(0); + thread.interrupt(); + } + } + if (arguments.totalNumTxn > 0) { + if (totalEndTxnOpFailNum.sum() + totalEndTxnOpSuccessNum.sum() >= arguments.totalNumTxn) { + log.info("------------------- DONE -----------------------"); + printAggregatedStats(); + PerfClientUtils.exit(0); + thread.interrupt(); + } + } + messagesReceived.increment(); + bytesReceived.add(msg.size()); + + totalMessagesReceived.increment(); + totalBytesReceived.add(msg.size()); + + if (limiter != null) { + limiter.acquire(); + } + + long latencyMillis = System.currentTimeMillis() - msg.getPublishTime(); + if (latencyMillis >= 0) { + recorder.recordValue(latencyMillis); + cumulativeRecorder.recordValue(latencyMillis); + } + if (arguments.isEnableTransaction) { + try { + messageReceiveLimiter.acquire(); + }catch (InterruptedException e){ + log.error("Got error: ", e); + } + consumer.acknowledgeAsync(msg.getMessageId(), atomicReference.get()).thenRun(() -> { + totalMessageAck.increment(); + messageAck.increment(); + }).exceptionally(throwable ->{ + log.error("Ack message {} failed with exception", msg, throwable); + totalMessageAckFailed.increment(); + return null; + }); + } else { + consumer.acknowledgeAsync(msg).thenRun(()->{ + totalMessageAck.increment(); + messageAck.increment(); + } + ).exceptionally(throwable ->{ + log.error("Ack message {} failed with exception", msg, throwable); + totalMessageAckFailed.increment(); + return null; + } + ); + } + if (arguments.poolMessages) { + msg.release(); + } + if (arguments.isEnableTransaction + && messageAckedCount.incrementAndGet() == arguments.numMessagesPerTransaction) { + Transaction transaction = atomicReference.get(); + if (!arguments.isAbortTransaction) { + transaction.commit() + .thenRun(() -> { + if (log.isDebugEnabled()) { + log.debug("Commit transaction {}", transaction.getTxnID()); + } + totalEndTxnOpSuccessNum.increment(); + numTxnOpSuccess.increment(); + }) + .exceptionally(exception -> { + log.error("Commit transaction failed with exception : ", exception); + totalEndTxnOpFailNum.increment(); + return null; + }); + } else { + transaction.abort().thenRun(() -> { + if (log.isDebugEnabled()) { + log.debug("Abort transaction {}", transaction.getTxnID()); + } + totalEndTxnOpSuccessNum.increment(); + numTxnOpSuccess.increment(); + }).exceptionally(exception -> { + log.error("Abort transaction {} failed with exception", + transaction.getTxnID().toString(), + exception); + totalEndTxnOpFailNum.increment(); + return null; + }); + } + while (true) { + try { + Transaction newTransaction = pulsarClient.newTransaction() + .withTransactionTimeout(arguments.transactionTimeout, TimeUnit.SECONDS) + .build().get(); + atomicReference.compareAndSet(transaction, newTransaction); + totalNumTxnOpenSuccess.increment(); + messageAckedCount.set(0); + messageReceiveLimiter.release(arguments.numMessagesPerTransaction); + break; + } catch (Exception e) { + log.error("Failed to new transaction with exception:", e); + totalNumTxnOpenFail.increment(); + } + } + } + + }; + List>> futures = Lists.newArrayList(); ConsumerBuilder consumerBuilder = pulsarClient.newConsumer(Schema.BYTEBUFFER) // .messageListener(listener) // @@ -387,18 +521,16 @@ public static void main(String[] args) throws Exception { } } } - for (Future> future : futures) { future.get(); } - log.info("Start receiving from {} consumers per subscription on {} topics", arguments.numConsumers, arguments.numTopics); long start = System.nanoTime(); Runtime.getRuntime().addShutdownHook(new Thread(() -> { - printAggregatedThroughput(start); + printAggregatedThroughput(start, arguments); printAggregatedStats(); })); @@ -406,7 +538,19 @@ public static void main(String[] args) throws Exception { long oldTime = System.nanoTime(); Histogram reportHistogram = null; + HistogramLogWriter histogramLogWriter = null; + + if (arguments.histogramFile != null) { + String statsFileName = arguments.histogramFile; + log.info("Dumping latency stats to {}", statsFileName); + + PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false); + histogramLogWriter = new HistogramLogWriter(histogramLog); + // Some log header bits + histogramLogWriter.outputLogFormatVersion(); + histogramLogWriter.outputLegend(); + } while (true) { try { @@ -420,17 +564,37 @@ public static void main(String[] args) throws Exception { long total = totalMessagesReceived.sum(); double rate = messagesReceived.sumThenReset() / elapsed; double throughput = bytesReceived.sumThenReset() / elapsed * 8 / 1024 / 1024; - + double rateAck = messageAck.sumThenReset() / elapsed; + long totalTxnOpSuccessNum = 0; + long totalTxnOpFailNum = 0; + double rateOpenTxn = 0; reportHistogram = recorder.getIntervalHistogram(reportHistogram); + if (arguments.isEnableTransaction) { + totalTxnOpSuccessNum = totalEndTxnOpSuccessNum.sum(); + totalTxnOpFailNum = totalEndTxnOpFailNum.sum(); + rateOpenTxn = numTxnOpSuccess.sumThenReset() / elapsed; + log.info("--- Transaction: {} transaction end successfully --- {} transaction end failed " + + "--- {} Txn/s --- AckRate: {} msg/s", + totalTxnOpSuccessNum, + totalTxnOpFailNum, + dec.format(rateOpenTxn), + dec.format(rateAck)); + } log.info( - "Throughput received: {} msg --- {} msg/s -- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", + "Throughput received: {} msg --- {} msg/s --- {} Mbit/s " + + "--- Latency: mean: {} ms - med: {} " + + "- 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", intFormat.format(total), dec.format(rate), dec.format(throughput), dec.format(reportHistogram.getMean()), reportHistogram.getValueAtPercentile(50), reportHistogram.getValueAtPercentile(95), reportHistogram.getValueAtPercentile(99), reportHistogram.getValueAtPercentile(99.9), reportHistogram.getValueAtPercentile(99.99), reportHistogram.getMaxValue()); + if (histogramLogWriter != null) { + histogramLogWriter.outputIntervalHistogram(reportHistogram); + } + reportHistogram.reset(); oldTime = now; } @@ -438,15 +602,41 @@ public static void main(String[] args) throws Exception { pulsarClient.close(); } - private static void printAggregatedThroughput(long start) { + private static void printAggregatedThroughput(long start, Arguments arguments) { double elapsed = (System.nanoTime() - start) / 1e9; double rate = totalMessagesReceived.sum() / elapsed; double throughput = totalBytesReceived.sum() / elapsed * 8 / 1024 / 1024; + long totalEndTxnSuccess = 0; + long totalEndTxnFail = 0; + long numTransactionOpenFailed = 0; + long numTransactionOpenSuccess = 0; + long totalnumMessageAckFailed = 0; + double rateAck = totalMessageAck.sum() / elapsed; + double rateOpenTxn = 0; + if (arguments.isEnableTransaction) { + totalEndTxnSuccess = totalEndTxnOpSuccessNum.sum(); + totalEndTxnFail = totalEndTxnOpFailNum.sum(); + rateOpenTxn = (totalEndTxnSuccess + totalEndTxnFail) / elapsed; + totalnumMessageAckFailed = totalMessageAckFailed.sum(); + numTransactionOpenFailed = totalNumTxnOpenFail.sum(); + numTransactionOpenSuccess = totalNumTxnOpenSuccess.sum(); + log.info("-- Transaction: {} transaction end successfully --- {} transaction end failed " + + "--- {} transaction open successfully --- {} transaction open failed " + + "--- {} Txn/s ", + totalEndTxnSuccess, + totalEndTxnFail, + numTransactionOpenSuccess, + numTransactionOpenFailed, + dec.format(rateOpenTxn)); + } log.info( - "Aggregated throughput stats --- {} records received --- {} msg/s --- {} Mbit/s", - totalMessagesReceived, + "Aggregated throughput stats --- {} records received --- {} msg/s --- {} Mbit/s" + + " --- AckRate: {} msg/s --- ack failed {} msg", + totalMessagesReceived.sum(), dec.format(rate), - dec.format(throughput)); + dec.format(throughput), + rateAck, + totalnumMessageAckFailed); } private static void printAggregatedStats() { diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java index 44b3c20d67d9e..c18cb6cfe3242 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java @@ -49,7 +49,11 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; import org.HdrHistogram.Histogram; @@ -71,6 +75,7 @@ import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS; import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_BATCHING_MAX_MESSAGES; +import org.apache.pulsar.client.api.transaction.Transaction; import org.apache.pulsar.common.partition.PartitionedTopicMetadata; import org.apache.pulsar.testclient.utils.PaddingDecimalFormat; import org.slf4j.Logger; @@ -88,11 +93,18 @@ public class PerformanceProducer { private static final LongAdder messagesFailed = new LongAdder(); private static final LongAdder bytesSent = new LongAdder(); + private static final LongAdder totalNumTxnOpenTxnFail = new LongAdder(); + private static final LongAdder totalNumTxnOpenTxnSuccess = new LongAdder(); + private static final LongAdder totalMessagesSent = new LongAdder(); private static final LongAdder totalBytesSent = new LongAdder(); - private static Recorder recorder = new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); - private static Recorder cumulativeRecorder = new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); + private static final Recorder recorder = new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); + private static final Recorder cumulativeRecorder = new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); + + private static final LongAdder totalEndTxnOpSuccessNum = new LongAdder(); + private static final LongAdder totalEndTxnOpFailNum = new LongAdder(); + private static final LongAdder numTxnOpSuccess = new LongAdder(); private static IMessageFormatter messageFormatter = null; @@ -238,7 +250,7 @@ static class Arguments { public String messageKeyGenerationMode = null; @Parameter(names = {"-ioThreads", "--num-io-threads"}, description = "Set the number of threads to be " + - "used for handling connections to brokers, default is 1 thread") + "used for handling connections to brokers. The default value is 1.") public int ioThreads = 1; @Parameter(names = {"-bw", "--busy-wait"}, description = "Enable Busy-Wait on the Pulsar client") @@ -253,6 +265,25 @@ static class Arguments { @Parameter(names = {"-fc", "--format-class"}, description="Custom Formatter class name") public String formatterClass = "org.apache.pulsar.testclient.DefaultMessageFormatter"; + + @Parameter(names = {"-tto", "--txn-timeout"}, description = "Set the time value of transaction timeout," + + " and the time unit is second. (After --txn-enable setting to true, --txn-timeout takes effect)") + public long transactionTimeout = 10; + + @Parameter(names = {"-nmt", "--numMessage-perTransaction"}, + description = "The number of messages sent by a transaction. " + + "(After --txn-enable setting to true, -nmt takes effect)") + public int numMessagesPerTransaction = 50; + + @Parameter(names = {"-txn", "--txn-enable"}, description = "Enable or disable the transaction") + public boolean isEnableTransaction = false; + + @Parameter(names = {"-abort"}, description = "Abort the transaction. (After --txn-enable " + + "setting to true, -abort takes effect)") + public boolean isAbortTransaction = false; + + @Parameter(names = { "--histogram-file" }, description = "HdrHistogram output file") + public String histogramFile = null; } public static void main(String[] args) throws Exception { @@ -373,7 +404,7 @@ public static void main(String[] args) throws Exception { long start = System.nanoTime(); Runtime.getRuntime().addShutdownHook(new Thread(() -> { - printAggregatedThroughput(start); + printAggregatedThroughput(start, arguments); printAggregatedStats(); })); @@ -390,7 +421,7 @@ public static void main(String[] args) throws Exception { clientBuilder.allowTlsInsecureConnection(arguments.tlsAllowInsecureConnection); } - try (PulsarAdmin client = clientBuilder.build();) { + try (PulsarAdmin client = clientBuilder.build()) { for (String topic : arguments.topics) { log.info("Creating partitioned topic {} with {} partitions", topic, arguments.partitions); try { @@ -436,16 +467,19 @@ public static void main(String[] args) throws Exception { long oldTime = System.nanoTime(); Histogram reportHistogram = null; + HistogramLogWriter histogramLogWriter = null; - String statsFileName = "perf-producer-" + System.currentTimeMillis() + ".hgrm"; - log.info("Dumping latency stats to {}", statsFileName); + if (arguments.histogramFile != null) { + String statsFileName = arguments.histogramFile; + log.info("Dumping latency stats to {}", statsFileName); - PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false); - HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog); + PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false); + histogramLogWriter = new HistogramLogWriter(histogramLog); - // Some log header bits - histogramLogWriter.outputLogFormatVersion(); - histogramLogWriter.outputLegend(); + // Some log header bits + histogramLogWriter.outputLogFormatVersion(); + histogramLogWriter.outputLegend(); + } while (true) { try { @@ -461,14 +495,27 @@ public static void main(String[] args) throws Exception { long now = System.nanoTime(); double elapsed = (now - oldTime) / 1e9; long total = totalMessagesSent.sum(); + long totalTxnOpSuccess = 0; + long totalTxnOpFail = 0; + double rateOpenTxn = 0; double rate = messagesSent.sumThenReset() / elapsed; double failureRate = messagesFailed.sumThenReset() / elapsed; double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8; reportHistogram = recorder.getIntervalHistogram(reportHistogram); + if (arguments.isEnableTransaction) { + totalTxnOpSuccess = totalEndTxnOpSuccessNum.sum(); + totalTxnOpFail = totalEndTxnOpFailNum.sum(); + rateOpenTxn = numTxnOpSuccess.sumThenReset() / elapsed; + log.info("--- Transaction : {} transaction end successfully --- {} transaction end failed " + + "--- {} Txn/s", + totalTxnOpSuccess, totalTxnOpFail, totalFormat.format(rateOpenTxn)); + } log.info( - "Throughput produced: {} msg --- {} msg/s --- {} Mbit/s --- failure {} msg/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", + "Throughput produced: {} msg --- {} msg/s --- {} Mbit/s --- failure {} msg/s " + + "--- Latency: mean: " + + "{} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", intFormat.format(total), throughputFormat.format(rate), throughputFormat.format(throughput), throughputFormat.format(failureRate), @@ -480,7 +527,10 @@ public static void main(String[] args) throws Exception { dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0), dec.format(reportHistogram.getMaxValue() / 1000.0)); - histogramLogWriter.outputIntervalHistogram(reportHistogram); + if (histogramLogWriter != null) { + histogramLogWriter.outputIntervalHistogram(reportHistogram); + } + reportHistogram.reset(); oldTime = now; @@ -511,6 +561,7 @@ private static void runProducer(int producerId, List>> futures = Lists.newArrayList(); ClientBuilder clientBuilder = PulsarClient.builder() // + .enableTransaction(arguments.isEnableTransaction)// .serviceUrl(arguments.serviceURL) // .connectionsPerBroker(arguments.maxConnections) // .ioThreads(arguments.ioThreads) // @@ -540,6 +591,16 @@ private static void runProducer(int producerId, // enable round robin message routing if it is a partitioned topic .messageRoutingMode(MessageRoutingMode.RoundRobinPartition); + AtomicReference transactionAtomicReference; + if (arguments.isEnableTransaction) { + producerBuilder.sendTimeout(0, TimeUnit.SECONDS); + transactionAtomicReference = new AtomicReference<>(client.newTransaction() + .withTransactionTimeout(arguments.transactionTimeout, TimeUnit.SECONDS) + .build() + .get()); + } else { + transactionAtomicReference = new AtomicReference<>(null); + } if (arguments.producerName != null) { String producerName = String.format("%s%s%d", arguments.producerName, arguments.separator, producerId); producerBuilder.producerName(producerName); @@ -604,6 +665,8 @@ private static void runProducer(int producerId, } // Send messages on all topics/producers long totalSent = 0; + AtomicLong numMessageSend = new AtomicLong(0); + Semaphore numMsgPerTxnLimit = new Semaphore(arguments.numMessagesPerTransaction); while (true) { for (Producer producer : producers) { if (arguments.testTime > 0) { @@ -626,7 +689,8 @@ private static void runProducer(int producerId, } } rateLimiter.acquire(); - + //if transaction is disable, transaction will be null. + Transaction transaction = transactionAtomicReference.get(); final long sendTime = System.nanoTime(); byte[] payloadData; @@ -641,10 +705,22 @@ private static void runProducer(int producerId, } else { payloadData = payloadBytes; } - - TypedMessageBuilder messageBuilder = producer.newMessage() - .value(payloadData); - if (arguments.delay >0) { + TypedMessageBuilder messageBuilder; + if (arguments.isEnableTransaction) { + if (arguments.numMessagesPerTransaction> 0) { + try{ + numMsgPerTxnLimit.acquire(); + }catch (InterruptedException exception){ + log.error("Get exception: ", exception); + } + } + messageBuilder = producer.newMessage(transaction) + .value(payloadData); + } else { + messageBuilder = producer.newMessage() + .value(payloadData); + } + if (arguments.delay > 0) { messageBuilder.deliverAfter(arguments.delay, TimeUnit.SECONDS); } //generate msg key @@ -653,9 +729,10 @@ private static void runProducer(int producerId, } else if (msgKeyMode == MessageKeyGenerationMode.autoIncrement) { messageBuilder.key(String.valueOf(totalSent)); } + PulsarClient pulsarClient = client; messageBuilder.sendAsync().thenRun(() -> { - messagesSent.increment(); bytesSent.add(payloadData.length); + messagesSent.increment(); totalMessagesSent.increment(); totalBytesSent.add(payloadData.length); @@ -672,13 +749,62 @@ private static void runProducer(int producerId, if (ex.getCause() instanceof ArrayIndexOutOfBoundsException) { return null; } - log.warn("Write error on message", ex); + log.warn("Write message error with exception", ex); messagesFailed.increment(); if (arguments.exitOnFailure) { PerfClientUtils.exit(-1); } return null; }); + if (arguments.isEnableTransaction + && numMessageSend.incrementAndGet() == arguments.numMessagesPerTransaction) { + if (!arguments.isAbortTransaction) { + transaction.commit() + .thenRun(() -> { + if (log.isDebugEnabled()) { + log.debug("Committed transaction {}", + transaction.getTxnID().toString()); + } + totalEndTxnOpSuccessNum.increment(); + numTxnOpSuccess.increment(); + }) + .exceptionally(exception -> { + log.error("Commit transaction failed with exception : ", + exception); + totalEndTxnOpFailNum.increment(); + return null; + }); + } else { + transaction.abort().thenRun(() -> { + if (log.isDebugEnabled()) { + log.debug("Abort transaction {}", transaction.getTxnID().toString()); + } + totalEndTxnOpSuccessNum.increment(); + numTxnOpSuccess.increment(); + }).exceptionally(exception -> { + log.error("Abort transaction {} failed with exception", + transaction.getTxnID().toString(), + exception); + totalEndTxnOpFailNum.increment(); + return null; + }); + } + while(true) { + try { + Transaction newTransaction = pulsarClient.newTransaction() + .withTransactionTimeout(arguments.transactionTimeout, + TimeUnit.SECONDS).build().get(); + transactionAtomicReference.compareAndSet(transaction, newTransaction); + numMessageSend.set(0); + numMsgPerTxnLimit.release(arguments.numMessagesPerTransaction); + totalNumTxnOpenTxnSuccess.increment(); + break; + }catch (Exception e){ + totalNumTxnOpenTxnFail.increment(); + log.error("Failed to new transaction with exception: ", e); + } + } + } } } } catch (Throwable t) { @@ -687,6 +813,7 @@ private static void runProducer(int producerId, if (null != client) { try { client.close(); + PerfClientUtils.exit(-1); } catch (PulsarClientException e) { log.error("Failed to close test client", e); } @@ -694,13 +821,34 @@ private static void runProducer(int producerId, } } - private static void printAggregatedThroughput(long start) { + private static void printAggregatedThroughput(long start, Arguments arguments) { double elapsed = (System.nanoTime() - start) / 1e9; double rate = totalMessagesSent.sum() / elapsed; double throughput = totalBytesSent.sum() / elapsed / 1024 / 1024 * 8; + long totalTxnSuccess = 0; + long totalTxnFail = 0; + double rateOpenTxn = 0; + long numTransactionOpenFailed = 0; + long numTransactionOpenSuccess = 0; + + if (arguments.isEnableTransaction) { + totalTxnSuccess = totalEndTxnOpSuccessNum.sum(); + totalTxnFail = totalEndTxnOpFailNum.sum(); + rateOpenTxn = elapsed / (totalTxnFail + totalTxnSuccess); + numTransactionOpenFailed = totalNumTxnOpenTxnFail.sum(); + numTransactionOpenSuccess = totalNumTxnOpenTxnSuccess.sum(); + log.info("--- Transaction : {} transaction end successfully --- {} transaction end failed " + + "--- {} transaction open successfully --- {} transaction open failed " + + "--- {} Txn/s", + totalTxnSuccess, + totalTxnFail, + numTransactionOpenSuccess, + numTransactionOpenFailed, + totalFormat.format(rateOpenTxn)); + } log.info( - "Aggregated throughput stats --- {} records sent --- {} msg/s --- {} Mbit/s", - totalMessagesSent, + "Aggregated throughput stats --- {} records sent --- {} msg/s --- {} Mbit/s ", + totalMessagesSent.sum(), totalFormat.format(rate), totalFormat.format(throughput)); } diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java index a7f66ad36a3b4..d18c76a2f8b4f 100644 --- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceReader.java @@ -134,6 +134,10 @@ static class Arguments { @Parameter(names = {"-ioThreads", "--num-io-threads"}, description = "Set the number of threads to be " + "used for handling connections to brokers, default is 1 thread") public int ioThreads = 1; + + @Parameter(names = {"-lt", "--num-listener-threads"}, description = "Set the number of threads" + + " to be used for message listeners") + public int listenerThreads = 1; } public static void main(String[] args) throws Exception { @@ -195,7 +199,7 @@ public static void main(String[] args) throws Exception { arguments.authParams = prop.getProperty("authParams", null); } - if (arguments.useTls == false) { + if (!arguments.useTls) { arguments.useTls = Boolean.parseBoolean(prop.getProperty("useTls")); } @@ -252,6 +256,7 @@ public static void main(String[] args) throws Exception { .connectionsPerBroker(arguments.maxConnections) // .statsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS) // .ioThreads(arguments.ioThreads) // + .listenerThreads(arguments.listenerThreads) .enableTls(arguments.useTls) // .tlsTrustCertsFilePath(arguments.tlsTrustCertsFilePath); diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceTransaction.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceTransaction.java new file mode 100644 index 0000000000000..eee284b6bbf2f --- /dev/null +++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceTransaction.java @@ -0,0 +1,711 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.testclient; + +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParameterException; +import com.beust.jcommander.Parameters; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.google.common.collect.Lists; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.text.DecimalFormat; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; +import org.HdrHistogram.Histogram; +import org.HdrHistogram.HistogramLogWriter; +import org.HdrHistogram.Recorder; +import org.apache.curator.shaded.com.google.common.util.concurrent.RateLimiter; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminBuilder; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.ConsumerBuilder; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.ProducerBuilder; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.api.transaction.Transaction; +import org.apache.pulsar.common.partition.PartitionedTopicMetadata; +import org.apache.pulsar.testclient.utils.PaddingDecimalFormat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PerformanceTransaction { + + + private static final LongAdder totalNumEndTxnOpFailed = new LongAdder(); + private static final LongAdder totalNumEndTxnOpSuccess = new LongAdder(); + private static final LongAdder numTxnOpSuccess = new LongAdder(); + private static final LongAdder totalNumTxnOpenTxnFail = new LongAdder(); + private static final LongAdder totalNumTxnOpenTxnSuccess = new LongAdder(); + + private static final LongAdder numMessagesAckFailed = new LongAdder(); + private static final LongAdder numMessagesAckSuccess = new LongAdder(); + private static final LongAdder numMessagesSendFailed = new LongAdder(); + private static final LongAdder numMessagesSendSuccess = new LongAdder(); + + private static final Recorder messageAckRecorder = + new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); + private static final Recorder messageAckCumulativeRecorder = + new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); + + private static final Recorder messageSendRecorder = + new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); + private static final Recorder messageSendRCumulativeRecorder = + new Recorder(TimeUnit.SECONDS.toMicros(120000), 5); + + + @Parameters(commandDescription = "Test pulsar transaction performance.") + static class Arguments { + + @Parameter(names = {"-h", "--help"}, description = "Help message", help = true) + boolean help; + + @Parameter(names = {"--conf-file"}, description = "Configuration file") + public String confFile; + + @Parameter(names = "--topics-c", description = "All topics that need ack for a transaction", required = + true) + public List consumerTopic = Collections.singletonList("test-consume"); + + @Parameter(names = "--topics-p", description = "All topics that need produce for a transaction", + required = true) + public List producerTopic = Collections.singletonList("test-produce"); + + @Parameter(names = {"-threads", "--num-test-threads"}, description = "Number of test threads." + + "This thread is for a new transaction to ack messages from consumer topics and produce message to " + + "producer topics, and then commit or abort this transaction. " + + "Increasing the number of threads increases the parallelism of the performance test, " + + "thereby increasing the intensity of the stress test.") + public int numTestThreads = 1; + + @Parameter(names = {"-au", "--admin-url"}, description = "Pulsar Admin URL") + public String adminURL; + + @Parameter(names = {"-u", "--service-url"}, description = "Pulsar Service URL") + public String serviceURL; + + @Parameter(names = {"-np", + "--partitions"}, description = "Create partitioned topics with a given number of partitions, 0 means" + + "not trying to create a topic") + public Integer partitions = null; + + @Parameter(names = {"-c", + "--max-connections"}, description = "Max number of TCP connections to a single broker") + public int maxConnections = 100; + + @Parameter(names = {"-time", + "--test-duration"}, description = "Test duration (in second). 0 means keeping publishing") + public long testTime = 0; + + @Parameter(names = {"-ioThreads", "--num-io-threads"}, description = "Set the number of threads to be " + + "used for handling connections to brokers. The default value is 1.") + public int ioThreads = 1; + + @Parameter(names = {"-ss", + "--subscriptions"}, description = "A list of subscriptions to consume (for example, sub1,sub2)") + public List subscriptions = Collections.singletonList("sub"); + + @Parameter(names = {"-ns", "--num-subscriptions"}, description = "Number of subscriptions (per topic)") + public int numSubscriptions = 1; + + @Parameter(names = {"-sp", "--subscription-position"}, description = "Subscription position") + private SubscriptionInitialPosition subscriptionInitialPosition = SubscriptionInitialPosition.Earliest; + + @Parameter(names = {"-st", "--subscription-type"}, description = "Subscription type") + public SubscriptionType subscriptionType = SubscriptionType.Shared; + + @Parameter(names = {"-q", "--receiver-queue-size"}, description = "Size of the receiver queue") + public int receiverQueueSize = 1000; + + @Parameter(names = {"-tto", "--txn-timeout"}, description = "Set the time value of transaction timeout," + + " and the time unit is second. (After --txn-enable setting to true, --txn-timeout takes effect)") + public long transactionTimeout = 5; + + @Parameter(names = {"-ntxn", + "--number-txn"}, description = "Set the number of transaction. 0 means keeping open." + + "If transaction disabled, it means the number of tasks. The task or transaction produces or " + + "consumes a specified number of messages.") + public long numTransactions = 0; + + @Parameter(names = {"-nmp", "--numMessage-perTransaction-produce"}, + description = "Set the number of messages produced in a transaction." + + "If transaction disabled, it means the number of messages produced in a task.") + public int numMessagesProducedPerTransaction = 1; + + @Parameter(names = {"-nmc", "--numMessage-perTransaction-consume"}, + description = "Set the number of messages consumed in a transaction." + + "If transaction disabled, it means the number of messages consumed in a task.") + public int numMessagesReceivedPerTransaction = 1; + + @Parameter(names = {"--txn-disable"}, description = "Disable transaction") + public boolean isDisableTransaction = false; + + @Parameter(names = {"-abort"}, description = "Abort the transaction. (After --txn-disEnable " + + "setting to false, -abort takes effect)") + public boolean isAbortTransaction = false; + + @Parameter(names = "-txnRate", description = "Set the rate of opened transaction or task. 0 means no limit") + public int openTxnRate = 0; + } + + public static void main(String[] args) + throws IOException, PulsarAdminException, ExecutionException, InterruptedException { + final Arguments arguments = new Arguments(); + JCommander jc = new JCommander(arguments); + jc.setProgramName("pulsar-perf transaction"); + + try { + jc.parse(args); + } catch (ParameterException e) { + System.out.println(e.getMessage()); + jc.usage(); + PerfClientUtils.exit(-1); + } + + if (arguments.help) { + jc.usage(); + PerfClientUtils.exit(-1); + } + + + if (arguments.confFile != null) { + Properties prop = new Properties(System.getProperties()); + prop.load(new FileInputStream(arguments.confFile)); + + if (arguments.serviceURL == null) { + arguments.serviceURL = prop.getProperty("brokerServiceUrl"); + } + + if (arguments.serviceURL == null) { + arguments.serviceURL = prop.getProperty("webServiceUrl"); + } + + // fallback to previous-version serviceUrl property to maintain backward-compatibility + if (arguments.serviceURL == null) { + arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/"); + } + + if (arguments.adminURL == null) { + arguments.adminURL = prop.getProperty("webServiceUrl"); + } + if (arguments.adminURL == null) { + arguments.adminURL = prop.getProperty("adminURL", "http://localhost:8080/"); + } + } + + + // Dump config variables + PerfClientUtils.printJVMInformation(log); + + ObjectMapper m = new ObjectMapper(); + ObjectWriter w = m.writerWithDefaultPrettyPrinter(); + log.info("Starting Pulsar perf transaction with config: {}", w.writeValueAsString(arguments)); + + final byte[] payloadBytes = new byte[1024]; + Random random = new Random(0); + for (int i = 0; i < payloadBytes.length; ++i) { + payloadBytes[i] = (byte) (random.nextInt(26) + 65); + } + if (arguments.partitions != null) { + PulsarAdminBuilder clientBuilder = PulsarAdmin.builder() + .serviceHttpUrl(arguments.adminURL); + try (PulsarAdmin client = clientBuilder.build()) { + for (String topic : arguments.producerTopic) { + log.info("Creating produce partitioned topic {} with {} partitions", topic, arguments.partitions); + try { + client.topics().createPartitionedTopic(topic, arguments.partitions); + } catch (PulsarAdminException.ConflictException alreadyExists) { + if (log.isDebugEnabled()) { + log.debug("Topic {} already exists: {}", topic, alreadyExists); + } + PartitionedTopicMetadata partitionedTopicMetadata = + client.topics().getPartitionedTopicMetadata(topic); + if (partitionedTopicMetadata.partitions != arguments.partitions) { + log.error( + "Topic {} already exists but it has a wrong number of partitions: {}, expecting {}", + topic, partitionedTopicMetadata.partitions, arguments.partitions); + PerfClientUtils.exit(-1); + } + } + } + } + } + + PulsarClient client = + PulsarClient.builder().enableTransaction(!arguments.isDisableTransaction) + .serviceUrl(arguments.serviceURL) + .connectionsPerBroker(arguments.maxConnections) + .statsInterval(0, TimeUnit.SECONDS) + .ioThreads(arguments.ioThreads) + .build(); + + ExecutorService executorService = new ThreadPoolExecutor(arguments.numTestThreads, + arguments.numTestThreads, + 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>()); + + + long startTime = System.nanoTime(); + long testEndTime = startTime + (long) (arguments.testTime * 1e9); + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + if (!arguments.isDisableTransaction) { + printTxnAggregatedThroughput(startTime); + } else { + printAggregatedThroughput(startTime); + } + printAggregatedStats(); + })); + + // start perf test + AtomicBoolean executing = new AtomicBoolean(true); + + RateLimiter rateLimiter = arguments.openTxnRate > 0 + ? RateLimiter.create(arguments.openTxnRate) + : null; + for(int i = 0; i < arguments.numTestThreads; i++) { + executorService.submit(() -> { + //The producer and consumer clients are built in advance, and then this thread is + //responsible for the production and consumption tasks of the transaction through the loop. + //A thread may perform tasks of multiple transactions in a traversing manner. + List> producers = null; + List>> consumers = null; + AtomicReference atomicReference = null; + try { + producers = buildProducers(client, arguments); + consumers = buildConsumer(client, arguments); + if (!arguments.isDisableTransaction) { + atomicReference = new AtomicReference<>(client.newTransaction() + .withTransactionTimeout(arguments.transactionTimeout, TimeUnit.SECONDS) + .build() + .get()); + } else { + atomicReference = new AtomicReference<>(null); + } + } catch (Exception e) { + log.error("Failed to build Producer/Consumer with exception : ", e); + executorService.shutdownNow(); + PerfClientUtils.exit(-1); + } + //The while loop has no break, and finally ends the execution through the shutdownNow of + //the executorService + while (true) { + if (arguments.numTransactions > 0) { + if (totalNumTxnOpenTxnFail.sum() + + totalNumTxnOpenTxnSuccess.sum() >= arguments.numTransactions) { + if (totalNumEndTxnOpFailed.sum() + + totalNumEndTxnOpSuccess.sum() < arguments.numTransactions ) { + continue; + } + log.info("------------------- DONE -----------------------"); + executing.compareAndSet(true, false); + executorService.shutdownNow(); + PerfClientUtils.exit(0); + break; + } + } + if (arguments.testTime > 0) { + if (System.nanoTime() > testEndTime) { + log.info("------------------- DONE -----------------------"); + executing.compareAndSet(true, false); + executorService.shutdownNow(); + PerfClientUtils.exit(0); + break; + } + } + Transaction transaction = atomicReference.get(); + for (List> subscriptions : consumers) { + for (Consumer consumer : subscriptions) { + for (int j = 0; j < arguments.numMessagesReceivedPerTransaction; j++) { + Message message = null; + try { + message = consumer.receive(); + } catch (PulsarClientException e) { + log.error("Receive message failed", e); + executorService.shutdownNow(); + PerfClientUtils.exit(-1); + } + long receiveTime = System.nanoTime(); + if (!arguments.isDisableTransaction) { + consumer.acknowledgeAsync(message.getMessageId(), transaction) + .thenRun(() -> { + long latencyMicros = NANOSECONDS.toMicros( + System.nanoTime() - receiveTime); + messageAckRecorder.recordValue(latencyMicros); + messageAckCumulativeRecorder.recordValue(latencyMicros); + numMessagesAckSuccess.increment(); + }).exceptionally(exception -> { + if (exception instanceof InterruptedException && !executing.get()) { + return null; + } + log.error( + "Ack message failed with transaction {} throw exception", + transaction, exception); + numMessagesAckFailed.increment(); + return null; + }); + } else { + consumer.acknowledgeAsync(message).thenRun(() -> { + long latencyMicros = NANOSECONDS.toMicros( + System.nanoTime() - receiveTime); + messageAckRecorder.recordValue(latencyMicros); + messageAckCumulativeRecorder.recordValue(latencyMicros); + numMessagesAckSuccess.increment(); + }).exceptionally(exception -> { + if (exception instanceof InterruptedException && !executing.get()) { + return null; + } + log.error( + "Ack message failed with transaction {} throw exception", + transaction, exception); + numMessagesAckFailed.increment(); + return null; + }); + } + } + } + } + + for(Producer producer : producers){ + for (int j = 0; j < arguments.numMessagesProducedPerTransaction; j++) { + long sendTime = System.nanoTime(); + if (!arguments.isDisableTransaction) { + producer.newMessage(transaction).value(payloadBytes) + .sendAsync().thenRun(() -> { + long latencyMicros = NANOSECONDS.toMicros( + System.nanoTime() - sendTime); + messageSendRecorder.recordValue(latencyMicros); + messageSendRCumulativeRecorder.recordValue(latencyMicros); + numMessagesSendSuccess.increment(); + }).exceptionally(exception -> { + if (exception instanceof InterruptedException && ! executing.get()) { + return null; + } + log.error("Send transaction message failed with exception : ", exception); + numMessagesSendFailed.increment(); + return null; + }); + } else { + producer.newMessage().value(payloadBytes) + .sendAsync().thenRun(() -> { + long latencyMicros = NANOSECONDS.toMicros( + System.nanoTime() - sendTime); + messageSendRecorder.recordValue(latencyMicros); + messageSendRCumulativeRecorder.recordValue(latencyMicros); + numMessagesSendSuccess.increment(); + }).exceptionally(exception -> { + if (exception instanceof InterruptedException && ! executing.get()) { + return null; + } + log.error("Send message failed with exception : ", exception); + numMessagesSendFailed.increment(); + return null; + }); + } + } + } + + if (rateLimiter != null){ + rateLimiter.tryAcquire(); + } + if (!arguments.isDisableTransaction) { + if (!arguments.isAbortTransaction) { + transaction.commit() + .thenRun(() -> { + numTxnOpSuccess.increment(); + totalNumEndTxnOpSuccess.increment(); + }).exceptionally(exception -> { + if (exception instanceof InterruptedException && ! executing.get()) { + return null; + } + log.error("Commit transaction {} failed with exception", + transaction.getTxnID().toString(), + exception); + totalNumEndTxnOpFailed.increment(); + return null; + }); + } else { + transaction.abort().thenRun(() -> { + numTxnOpSuccess.increment(); + totalNumEndTxnOpSuccess.increment(); + }).exceptionally(exception -> { + if (exception instanceof InterruptedException && ! executing.get()) { + return null; + } + log.error("Commit transaction {} failed with exception", + transaction.getTxnID().toString(), + exception); + totalNumEndTxnOpFailed.increment(); + return null; + }); + } + while (true) { + try{ + Transaction newTransaction = client.newTransaction() + .withTransactionTimeout(arguments.transactionTimeout, TimeUnit.SECONDS) + .build() + .get(); + atomicReference.compareAndSet(transaction, newTransaction); + totalNumTxnOpenTxnSuccess.increment(); + break; + }catch (Exception throwable){ + if (throwable instanceof InterruptedException && !executing.get()) { + break; + } + log.error("Failed to new transaction with exception: ", throwable); + totalNumTxnOpenTxnFail.increment(); + } + } + } else { + totalNumTxnOpenTxnSuccess.increment(); + totalNumEndTxnOpSuccess.increment(); + numTxnOpSuccess.increment(); + } + } + }); + } + + + + // Print report stats + long oldTime = System.nanoTime(); + + Histogram reportSendHistogram = null; + Histogram reportAckHistogram = null; + + String statsFileName = "perf-transaction-" + System.currentTimeMillis() + ".hgrm"; + log.info("Dumping latency stats to {}", statsFileName); + + PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false); + HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog); + + // Some log header bits + histogramLogWriter.outputLogFormatVersion(); + histogramLogWriter.outputLegend(); + + while (executing.get()) { + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + break; + } + long now = System.nanoTime(); + double elapsed = (now - oldTime) / 1e9; + long total = totalNumEndTxnOpFailed.sum() + totalNumTxnOpenTxnSuccess.sum(); + double rate = numTxnOpSuccess.sumThenReset() / elapsed; + reportSendHistogram = messageSendRecorder.getIntervalHistogram(reportSendHistogram); + reportAckHistogram = messageAckRecorder.getIntervalHistogram(reportAckHistogram); + String txnOrTaskLog = !arguments.isDisableTransaction + ? "Throughput transaction: {} transaction executes --- {} transaction/s" + : "Throughput task: {} task executes --- {} task/s"; + log.info( + txnOrTaskLog + " --- send Latency: mean: {} ms - med: {} " + + "- 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}" + " --- ack Latency: " + + "mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", + intFormat.format(total), + dec.format(rate), + dec.format(reportSendHistogram.getMean() / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(50) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(95) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(99) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(99.9) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(99.99) / 1000.0), + dec.format(reportSendHistogram.getMaxValue() / 1000.0), + dec.format(reportAckHistogram.getMean() / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(50) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(95) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(99) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(99.9) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(99.99) / 1000.0), + dec.format(reportAckHistogram.getMaxValue() / 1000.0)); + + histogramLogWriter.outputIntervalHistogram(reportSendHistogram); + histogramLogWriter.outputIntervalHistogram(reportAckHistogram); + reportSendHistogram.reset(); + reportAckHistogram.reset(); + + oldTime = now; + } + + + } + + + private static void printTxnAggregatedThroughput(long start) { + double elapsed = (System.nanoTime() - start) / 1e9; + long numTransactionEndFailed = totalNumEndTxnOpFailed.sum(); + long numTransactionEndSuccess = totalNumEndTxnOpSuccess.sum(); + long total = numTransactionEndFailed + numTransactionEndSuccess; + double rate = total / elapsed; + long numMessageAckFailed = numMessagesAckFailed.sum(); + long numMessageAckSuccess = numMessagesAckSuccess.sum(); + long numMessageSendFailed = numMessagesSendFailed.sum(); + long numMessageSendSuccess = numMessagesSendSuccess.sum(); + long numTransactionOpenFailed = totalNumTxnOpenTxnFail.sum(); + long numTransactionOpenSuccess = totalNumTxnOpenTxnSuccess.sum(); + + log.info( + "Aggregated throughput stats --- {} transaction executed --- {} transaction/s " + + " --- {} transaction open successfully --- {} transaction open failed" + + " --- {} transaction end successfully --- {} transaction end failed" + + " --- {} message ack failed --- {} message send failed" + + " --- {} message ack success --- {} message send success ", + total, + dec.format(rate), + numTransactionOpenSuccess, + numTransactionOpenFailed, + numTransactionEndSuccess, + numTransactionEndFailed, + numMessageAckFailed, + numMessageSendFailed, + numMessageAckSuccess, + numMessageSendSuccess); + + } + + private static void printAggregatedThroughput(long start) { + double elapsed = (System.nanoTime() - start) / 1e9; + long total = totalNumEndTxnOpFailed.sum() + totalNumEndTxnOpSuccess.sum(); + double rate = total / elapsed; + long numMessageAckFailed = numMessagesAckFailed.sum(); + long numMessageAckSuccess = numMessagesAckSuccess.sum(); + long numMessageSendFailed = numMessagesSendFailed.sum(); + long numMessageSendSuccess = numMessagesSendSuccess.sum(); + log.info( + "Aggregated throughput stats --- {} task executed --- {} task/s" + + " --- {} message ack failed --- {} message send failed" + + " --- {} message ack success --- {} message send success", + total, + totalFormat.format(rate), + numMessageAckFailed, + numMessageSendFailed, + numMessageAckSuccess, + numMessageSendSuccess); + } + + private static void printAggregatedStats() { + Histogram reportAckHistogram = messageAckCumulativeRecorder.getIntervalHistogram(); + Histogram reportSendHistogram = messageSendRCumulativeRecorder.getIntervalHistogram(); + log.info( + "Messages ack aggregated latency stats --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - " + + "99.9pct: {} - " + + "99.99pct: {} - 99.999pct: {} - Max: {}", + dec.format(reportAckHistogram.getMean() / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(50) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(95) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(99) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(99.9) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(99.99) / 1000.0), + dec.format(reportAckHistogram.getValueAtPercentile(99.999) / 1000.0), + dec.format(reportAckHistogram.getMaxValue() / 1000.0)); + log.info( + "Messages send aggregated latency stats --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - " + + "99.9pct: {} - " + + "99.99pct: {} - 99.999pct: {} - Max: {}", + dec.format(reportSendHistogram.getMean() / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(50) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(95) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(99) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(99.9) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(99.99) / 1000.0), + dec.format(reportSendHistogram.getValueAtPercentile(99.999) / 1000.0), + dec.format(reportSendHistogram.getMaxValue() / 1000.0)); + } + + + + static final DecimalFormat dec = new PaddingDecimalFormat("0.000", 7); + static final DecimalFormat intFormat = new PaddingDecimalFormat("0", 7); + static final DecimalFormat totalFormat = new DecimalFormat("0.000"); + private static final Logger log = LoggerFactory.getLogger(PerformanceProducer.class); + + + private static List>> buildConsumer(PulsarClient client, Arguments arguments) + throws ExecutionException, InterruptedException { + ConsumerBuilder consumerBuilder = client.newConsumer(Schema.BYTES) // + .subscriptionType(arguments.subscriptionType) + .receiverQueueSize(arguments.receiverQueueSize) + .subscriptionInitialPosition(arguments.subscriptionInitialPosition); + + Iterator consumerTopicsIterator = arguments.consumerTopic.iterator(); + List>> consumers = Lists.newArrayListWithCapacity(arguments.consumerTopic.size()); + while(consumerTopicsIterator.hasNext()){ + String topic = consumerTopicsIterator.next(); + final List> subscriptions = Lists.newArrayListWithCapacity(arguments.numSubscriptions); + final List>> subscriptionFutures = + Lists.newArrayListWithCapacity(arguments.numSubscriptions); + log.info("Create subscriptions for topic {}", topic); + for (int j = 0; j < arguments.numSubscriptions; j++) { + String subscriberName = arguments.subscriptions.get(j); + subscriptionFutures + .add(consumerBuilder.clone().topic(topic).subscriptionName(subscriberName) + .subscribeAsync()); + } + for (Future> future : subscriptionFutures) { + subscriptions.add(future.get()); + } + consumers.add(subscriptions); + } + return consumers; + } + + private static List> buildProducers(PulsarClient client, Arguments arguments) + throws ExecutionException, InterruptedException { + + ProducerBuilder producerBuilder = client.newProducer(Schema.BYTES) + .sendTimeout(0, TimeUnit.SECONDS); + + final List>> producerFutures = Lists.newArrayList(); + for (String topic : arguments.producerTopic) { + log.info("Create producer for topic {}", topic); + producerFutures.add(producerBuilder.clone().topic(topic).createAsync()); + } + final List> producers = Lists.newArrayListWithCapacity(producerFutures.size()); + + for (Future> future : producerFutures) { + producers.add(future.get()); + } + return producers; + } + +} diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceTransactionTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceTransactionTest.java new file mode 100644 index 0000000000000..a08fbe09f62e0 --- /dev/null +++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceTransactionTest.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.testclient; + +import com.google.common.collect.Sets; +import java.net.URL; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.ServiceConfiguration; +import org.apache.pulsar.broker.auth.MockedPulsarServiceBaseTest; +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.Producer; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Schema; +import org.apache.pulsar.client.api.SubscriptionInitialPosition; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.common.naming.NamespaceName; +import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.policies.data.ClusterData; +import org.apache.pulsar.common.policies.data.TenantInfoImpl; +import org.testng.Assert; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.testng.Assert.fail; + +@Slf4j +public class PerformanceTransactionTest extends MockedPulsarServiceBaseTest { + private final String testTenant = "pulsar"; + private final String testNamespace = "perf"; + private final String myNamespace = testTenant + "/" + testNamespace; + private final String testTopic = "persistent://" + myNamespace + "/test-"; + private final AtomicInteger lastExitCode = new AtomicInteger(0); + + @BeforeMethod + @Override + protected void setup() throws Exception { + ServiceConfiguration serviceConfiguration = getDefaultConf(); + serviceConfiguration.setSystemTopicEnabled(true); + serviceConfiguration.setTransactionCoordinatorEnabled(true); + super.internalSetup(serviceConfiguration); + PerfClientUtils.setExitProcedure(code -> { + log.error("JVM exit code is {}", code); + if (code != 0) { + throw new RuntimeException("JVM should exit with code " + code); + } + }); + // Setup namespaces + admin.clusters().createCluster("test", ClusterData.builder().serviceUrl(pulsar.getWebServiceAddress()).build()); + admin.tenants().createTenant(NamespaceName.SYSTEM_NAMESPACE.getTenant(), + new TenantInfoImpl(Sets.newHashSet("appid1"), Sets.newHashSet("test"))); + admin.namespaces().createNamespace(myNamespace, Sets.newHashSet("test")); + admin.namespaces().createNamespace(NamespaceName.SYSTEM_NAMESPACE.toString()); + admin.topics().createPartitionedTopic(TopicName.TRANSACTION_COORDINATOR_ASSIGN.toString(), 1); + } + + @AfterMethod(alwaysRun = true) + @Override + protected void cleanup() throws Exception { + super.internalCleanup(); + int exitCode = lastExitCode.get(); + if (exitCode != 0) { + fail("Unexpected JVM exit code "+exitCode); + } + } + + @Test + public void testTxnPerf() throws Exception { + String argString = "--topics-c %s --topics-p %s -threads 1 -ntxn 50 -u %s -ss %s -np 1 -au %s"; + String testConsumeTopic = testTopic + UUID.randomUUID(); + String testProduceTopic = testTopic + UUID.randomUUID(); + String testSub = "testSub"; + admin.topics().createPartitionedTopic(testConsumeTopic, 1); + String args = String.format(argString, testConsumeTopic, testProduceTopic, + pulsar.getBrokerServiceUrl(), testSub, new URL(pulsar.getWebServiceAddress())); + + PulsarClient pulsarClient = PulsarClient.builder() + .enableTransaction(true) + .serviceUrl(pulsar.getBrokerServiceUrl()) + .connectionsPerBroker(100) + .statsInterval(0, TimeUnit.SECONDS) + .build(); + Producer produceToConsumeTopic = pulsarClient.newProducer(Schema.BYTES) + .producerName("perf-transaction-producer") + .sendTimeout(0, TimeUnit.SECONDS) + .topic(testConsumeTopic) + .create(); + pulsarClient.newConsumer(Schema.BYTES) + .consumerName("perf-transaction-consumeVerify") + .topic(testConsumeTopic) + .subscriptionType(SubscriptionType.Shared) + .subscriptionName(testSub + "pre") + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + CountDownLatch countDownLatch = new CountDownLatch(50); + for (int i = 0; i < 50 + ; i++) { + produceToConsumeTopic.newMessage().value(("testConsume " + i).getBytes()).sendAsync().thenRun( + countDownLatch::countDown); + } + + countDownLatch.await(); + + Thread thread = new Thread(() -> { + try { + PerformanceTransaction.main(args.split(" ")); + } catch (Exception e) { + e.printStackTrace(); + } + }); + thread.start(); + thread.join(); + Consumer consumeFromConsumeTopic = pulsarClient.newConsumer(Schema.BYTES) + .consumerName("perf-transaction-consumeVerify") + .topic(testConsumeTopic) + .subscriptionType(SubscriptionType.Shared) + .subscriptionName(testSub) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + Consumer consumeFromProduceTopic = pulsarClient.newConsumer(Schema.BYTES) + .consumerName("perf-transaction-produceVerify") + .topic(testProduceTopic) + .subscriptionName(testSub) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + for (int i = 0; i < 50; i++) { + Message message = consumeFromProduceTopic.receive(2, TimeUnit.SECONDS); + Assert.assertNotNull(message); + consumeFromProduceTopic.acknowledge(message); + } + Message message = consumeFromConsumeTopic.receive(2, TimeUnit.SECONDS); + Assert.assertNull(message); + message = consumeFromProduceTopic.receive(2, TimeUnit.SECONDS); + Assert.assertNull(message); + } + + + @Test + public void testProduceTxnMessage() throws InterruptedException, PulsarClientException { + String argString = "%s -r 10 -u %s -m %d -txn"; + String topic = testTopic + UUID.randomUUID(); + int totalMessage = 100; + String args = String.format(argString, topic, pulsar.getBrokerServiceUrl(), totalMessage); + pulsarClient.newConsumer().subscriptionName("subName" + "pre").topic(topic) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Exclusive) + .enableBatchIndexAcknowledgment(false) + .subscribe(); + Thread thread = new Thread(() -> { + try { + log.info(""); + PerformanceProducer.main(args.split(" ")); + } catch (Exception e) { + e.printStackTrace(); + } + }); + thread.start(); + thread.join(); + Consumer consumer = pulsarClient.newConsumer().subscriptionName("subName").topic(topic) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Exclusive) + .enableBatchIndexAcknowledgment(false) + .subscribe(); + for (int i = 0; i < totalMessage; i++) { + Message message = consumer.receive(2, TimeUnit.SECONDS); + Assert.assertNotNull(message); + consumer.acknowledge(message); + } + Message message = consumer.receive(2, TimeUnit.SECONDS); + Assert.assertNull(message); + } + + @Test + public void testConsumeTxnMessage() throws InterruptedException, PulsarClientException { + String argString = "%s -r 10 -u %s -txn -ss %s -st %s -sp %s -ntxn %d"; + String subName = "sub"; + String topic = testTopic + UUID.randomUUID(); + String args = String.format(argString, topic, pulsar.getBrokerServiceUrl(), subName, + SubscriptionType.Exclusive, SubscriptionInitialPosition.Earliest, 10); + Producer producer = pulsarClient.newProducer().topic(topic).sendTimeout(0, TimeUnit.SECONDS) + .create(); + pulsarClient.newConsumer(Schema.BYTES) + .consumerName("perf-transaction-consumeVerify") + .topic(topic) + .subscriptionType(SubscriptionType.Shared) + .subscriptionName(subName + "pre") + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscribe(); + for (int i = 0; i < 505; i++) { + producer.newMessage().value("messages for test transaction consumer".getBytes()).send(); + } + Thread thread = new Thread(() -> { + try { + log.info(""); + PerformanceConsumer.main(args.split(" ")); + } catch (Exception e) { + e.printStackTrace(); + } + }); + thread.start(); + thread.join(); + Consumer consumer = pulsarClient.newConsumer().subscriptionName(subName).topic(topic) + .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) + .subscriptionType(SubscriptionType.Exclusive) + .enableBatchIndexAcknowledgment(false) + .subscribe(); + for (int i = 0; i < 5; i++) { + Message message = consumer.receive(2, TimeUnit.SECONDS); + Assert.assertNotNull(message); + } + Message message = consumer.receive(2, TimeUnit.SECONDS); + Assert.assertNull(message); + } + +} diff --git a/pulsar-transaction/common/pom.xml b/pulsar-transaction/common/pom.xml index b3b0e3d369a07..7b60179835259 100644 --- a/pulsar-transaction/common/pom.xml +++ b/pulsar-transaction/common/pom.xml @@ -27,7 +27,7 @@ org.apache.pulsar pulsar-transaction-parent - 2.9.0-SNAPSHOT + 2.9.3 pulsar-transaction-common diff --git a/pulsar-transaction/coordinator/pom.xml b/pulsar-transaction/coordinator/pom.xml index b5bedab65452f..c785b8c6df9c2 100644 --- a/pulsar-transaction/coordinator/pom.xml +++ b/pulsar-transaction/coordinator/pom.xml @@ -27,7 +27,7 @@ org.apache.pulsar pulsar-transaction-parent - 2.9.0-SNAPSHOT + 2.9.3 pulsar-transaction-coordinator diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/TransactionMetadataStoreState.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/TransactionMetadataStoreState.java index 5dbd9ba6dcb9d..8947413191cf6 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/TransactionMetadataStoreState.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/TransactionMetadataStoreState.java @@ -33,6 +33,7 @@ public enum State { None, Initializing, Ready, + Closing, Close } @@ -55,10 +56,14 @@ protected boolean changeToInitializingState() { return STATE_UPDATER.compareAndSet(this, State.None, State.Initializing); } + protected boolean changeToClosingState() { + return (STATE_UPDATER.compareAndSet(this, State.Ready, State.Closing) + || STATE_UPDATER.compareAndSet(this, State.None, State.Closing) + || STATE_UPDATER.compareAndSet(this, State.Initializing, State.Closing)); + } + protected boolean changeToCloseState() { - return (STATE_UPDATER.compareAndSet(this, State.Ready, State.Close) - || STATE_UPDATER.compareAndSet(this, State.None, State.Close) - || STATE_UPDATER.compareAndSet(this, State.Initializing, State.Close)); + return STATE_UPDATER.compareAndSet(this, State.Closing, State.Close); } protected boolean checkIfReady() { @@ -68,4 +73,4 @@ protected boolean checkIfReady() { public State getState() { return STATE_UPDATER.get(this); } -} \ No newline at end of file +} diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java index f2324afec82ee..1727f477be82e 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogImpl.java @@ -31,8 +31,6 @@ import org.apache.bookkeeper.mledger.ManagedLedgerException.ManagedLedgerAlreadyClosedException; import org.apache.bookkeeper.mledger.ManagedLedgerFactory; import org.apache.bookkeeper.mledger.Position; -import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl; -import org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.State; import org.apache.bookkeeper.mledger.impl.PositionImpl; import org.apache.pulsar.common.allocator.PulsarByteBufAllocator; import org.apache.pulsar.common.api.proto.CommandSubscribe; @@ -68,21 +66,21 @@ public class MLTransactionLogImpl implements TransactionLog { private final TopicName topicName; - private final MLTransactionLogInterceptor mlTransactionLogInterceptor; - public MLTransactionLogImpl(TransactionCoordinatorID tcID, ManagedLedgerFactory managedLedgerFactory, ManagedLedgerConfig managedLedgerConfig) { - this.topicName = TopicName.get(TopicDomain.persistent.value(), - NamespaceName.SYSTEM_NAMESPACE, TRANSACTION_LOG_PREFIX + tcID.getId()); + this.topicName = getMLTransactionLogName(tcID); this.tcId = tcID.getId(); - this.mlTransactionLogInterceptor = new MLTransactionLogInterceptor(); - managedLedgerConfig.setManagedLedgerInterceptor(this.mlTransactionLogInterceptor); this.managedLedgerFactory = managedLedgerFactory; this.managedLedgerConfig = managedLedgerConfig; this.entryQueue = new SpscArrayQueue<>(2000); } + public static TopicName getMLTransactionLogName(TransactionCoordinatorID tcID) { + return TopicName.get(TopicDomain.persistent.value(), + NamespaceName.SYSTEM_NAMESPACE, TRANSACTION_LOG_PREFIX + tcID.getId()); + } + @Override public CompletableFuture initialize() { CompletableFuture future = new CompletableFuture<>(); @@ -157,16 +155,13 @@ public CompletableFuture append(TransactionMetadataEntry transactionMe @Override public void addComplete(Position position, ByteBuf entryData, Object ctx) { buf.release(); - mlTransactionLogInterceptor.setMaxLocalTxnId(transactionMetadataEntry.getMaxLocalTxnId()); completableFuture.complete(position); } @Override public void addFailed(ManagedLedgerException exception, Object ctx) { log.error("Transaction log write transaction operation error", exception); - if (exception instanceof ManagedLedgerAlreadyClosedException - && managedLedger instanceof ManagedLedgerImpl - && State.WriteFailed == ((ManagedLedgerImpl) managedLedger).getState()) { + if (exception instanceof ManagedLedgerAlreadyClosedException) { managedLedger.readyToCreateNewLedger(); } buf.release(); @@ -238,57 +233,24 @@ public void start() { } } - public CompletableFuture getMaxLocalTxnId() { - - CompletableFuture completableFuture = new CompletableFuture<>(); - PositionImpl position = (PositionImpl) managedLedger.getLastConfirmedEntry(); - - if (position != null && position.getEntryId() != -1 - && ((ManagedLedgerImpl) managedLedger).ledgerExists(position.getLedgerId())) { - ((ManagedLedgerImpl) this.managedLedger).asyncReadEntry(position, new AsyncCallbacks.ReadEntryCallback() { - @Override - public void readEntryComplete(Entry entry, Object ctx) { - TransactionMetadataEntry lastConfirmEntry = new TransactionMetadataEntry(); - ByteBuf buffer = entry.getDataBuffer(); - lastConfirmEntry.parseFrom(buffer, buffer.readableBytes()); - completableFuture.complete(lastConfirmEntry.getMaxLocalTxnId()); - } - - @Override - public void readEntryFailed(ManagedLedgerException exception, Object ctx) { - log.error("[{}] MLTransactionLog recover MaxLocalTxnId fail!", topicName, exception); - completableFuture.completeExceptionally(exception); - } - }, null); - } else if (managedLedger.getProperties() - .get(MLTransactionLogInterceptor.MAX_LOCAL_TXN_ID) != null) { - completableFuture.complete(Long.parseLong(managedLedger.getProperties() - .get(MLTransactionLogInterceptor.MAX_LOCAL_TXN_ID))); - } else { - log.error("[{}] MLTransactionLog recover MaxLocalTxnId fail! " - + "not found MaxLocalTxnId in managedLedger and properties", topicName); - completableFuture.completeExceptionally(new ManagedLedgerException(topicName - + "MLTransactionLog recover MaxLocalTxnId fail! " - + "not found MaxLocalTxnId in managedLedger and properties")); - } - return completableFuture; - } - class FillEntryQueueCallback implements AsyncCallbacks.ReadEntriesCallback { private final AtomicLong outstandingReadsRequests = new AtomicLong(0); + private volatile boolean isReadable = true; + private static final int NUMBER_OF_PER_READ_ENTRY = 100; boolean fillQueue() { - if (entryQueue.size() < entryQueue.capacity() && outstandingReadsRequests.get() == 0) { + if (entryQueue.size() + NUMBER_OF_PER_READ_ENTRY < entryQueue.capacity() + && outstandingReadsRequests.get() == 0) { if (cursor.hasMoreEntries()) { outstandingReadsRequests.incrementAndGet(); - readAsync(100, this); - return true; + readAsync(NUMBER_OF_PER_READ_ENTRY, this); + return isReadable; } else { return false; } } else { - return true; + return isReadable; } } @@ -309,8 +271,15 @@ public Entry get() { @Override public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { + if (managedLedgerConfig.isAutoSkipNonRecoverableData() + && exception instanceof ManagedLedgerException.NonRecoverableLedgerException + || exception instanceof ManagedLedgerException.ManagedLedgerFencedException + || exception instanceof ManagedLedgerException.CursorAlreadyClosedException) { + isReadable = false; + } else { + outstandingReadsRequests.decrementAndGet(); + } log.error("Transaction log init fail error!", exception); - outstandingReadsRequests.decrementAndGet(); } } diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogInterceptor.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogInterceptor.java deleted file mode 100644 index e97b104e273e1..0000000000000 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionLogInterceptor.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.pulsar.transaction.coordinator.impl; - -import org.apache.bookkeeper.client.LedgerHandle; -import org.apache.bookkeeper.mledger.impl.OpAddEntry; -import org.apache.bookkeeper.mledger.intercept.ManagedLedgerInterceptor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -/** - * Store max sequenceID in ManagedLedger properties, in order to recover transaction log. - */ -public class MLTransactionLogInterceptor implements ManagedLedgerInterceptor { - - private static final Logger log = LoggerFactory.getLogger(MLTransactionLogInterceptor.class); - public static final String MAX_LOCAL_TXN_ID = "max_local_txn_id"; - - private volatile long maxLocalTxnId = -1; - - @Override - public OpAddEntry beforeAddEntry(OpAddEntry op, int numberOfMessages) { - return null; - } - - @Override - public void onManagedLedgerPropertiesInitialize(Map propertiesMap) { - - } - - @Override - public CompletableFuture onManagedLedgerLastLedgerInitialize(String name, LedgerHandle ledgerHandle) { - return CompletableFuture.completedFuture(null); - } - - @Override - public void onUpdateManagedLedgerInfo(Map propertiesMap) { - propertiesMap.put(MAX_LOCAL_TXN_ID, maxLocalTxnId + ""); - } - - protected void setMaxLocalTxnId(long maxLocalTxnId) { - this.maxLocalTxnId = maxLocalTxnId; - } -} diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStore.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStore.java index 05faaad1aa002..6c88d27cc2298 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStore.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStore.java @@ -18,18 +18,23 @@ */ package org.apache.pulsar.transaction.coordinator.impl; +import com.google.common.util.concurrent.MoreExecutors; +import io.netty.util.concurrent.DefaultThreadFactory; +import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.NoSuchElementException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.LongAdder; import org.apache.bookkeeper.mledger.ManagedLedger; import org.apache.bookkeeper.mledger.Position; import org.apache.commons.lang3.tuple.MutablePair; import org.apache.commons.lang3.tuple.Pair; +import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException; import org.apache.pulsar.client.api.transaction.TxnID; import org.apache.pulsar.common.api.proto.Subscription; import org.apache.pulsar.common.policies.data.TransactionCoordinatorStats; @@ -60,9 +65,7 @@ public class MLTransactionMetadataStore private static final Logger log = LoggerFactory.getLogger(MLTransactionMetadataStore.class); private final TransactionCoordinatorID tcID; - private final AtomicLong sequenceId = new AtomicLong(TC_ID_NOT_USED); private final MLTransactionLogImpl transactionLog; - private static final long TC_ID_NOT_USED = -1L; private final ConcurrentSkipListMap>> txnMetaMap = new ConcurrentSkipListMap<>(); private final TransactionTimeoutTracker timeoutTracker; private final TransactionMetadataStoreStats transactionMetadataStoreStats; @@ -71,12 +74,15 @@ public class MLTransactionMetadataStore private final LongAdder abortedTransactionCount; private final LongAdder transactionTimeoutCount; private final LongAdder appendLogCount; + private final MLTransactionSequenceIdGenerator sequenceIdGenerator; + private final ExecutorService internalPinnedExecutor; public MLTransactionMetadataStore(TransactionCoordinatorID tcID, MLTransactionLogImpl mlTransactionLog, TransactionTimeoutTracker timeoutTracker, - TransactionRecoverTracker recoverTracker) { + MLTransactionSequenceIdGenerator sequenceIdGenerator) { super(State.None); + this.sequenceIdGenerator = sequenceIdGenerator; this.tcID = tcID; this.transactionLog = mlTransactionLog; this.timeoutTracker = timeoutTracker; @@ -87,97 +93,111 @@ public MLTransactionMetadataStore(TransactionCoordinatorID tcID, this.abortedTransactionCount = new LongAdder(); this.transactionTimeoutCount = new LongAdder(); this.appendLogCount = new LongAdder(); + DefaultThreadFactory threadFactory = new DefaultThreadFactory("transaction_coordinator_" + + tcID.toString() + "thread_factory"); + this.internalPinnedExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory); + } + public CompletableFuture init(TransactionRecoverTracker recoverTracker) { + CompletableFuture completableFuture = new CompletableFuture<>(); if (!changeToInitializingState()) { log.error("Managed ledger transaction metadata store change state error when init it"); - return; - } - new Thread(() -> transactionLog.replayAsync(new TransactionLogReplayCallback() { + completableFuture + .completeExceptionally(new TransactionCoordinatorClientException + .CoordinatorNotFoundException("transaction metadata store with tcId " + + tcID.toString() + " change state to Initializing error when init it")); + } else { + internalPinnedExecutor.execute(() -> transactionLog.replayAsync(new TransactionLogReplayCallback() { - @Override - public void replayComplete() { - mlTransactionLog.getMaxLocalTxnId().thenAccept(id -> { + @Override + public void replayComplete() { recoverTracker.appendOpenTransactionToTimeoutTracker(); - sequenceId.set(id); if (!changeToReadyState()) { log.error("Managed ledger transaction metadata store change state error when replay complete"); + completableFuture + .completeExceptionally(new TransactionCoordinatorClientException + .CoordinatorNotFoundException("transaction metadata store with tcId " + + tcID.toString() + " change state to Ready error when init it")); + } else { recoverTracker.handleCommittingAndAbortingTransaction(); timeoutTracker.start(); + completableFuture.complete(MLTransactionMetadataStore.this); } - }); - } - - @Override - public void handleMetadataEntry(Position position, TransactionMetadataEntry transactionMetadataEntry) { + } - try { + @Override + public void handleMetadataEntry(Position position, TransactionMetadataEntry transactionMetadataEntry) { - TxnID txnID = new TxnID(transactionMetadataEntry.getTxnidMostBits(), + try { + TxnID txnID = new TxnID(transactionMetadataEntry.getTxnidMostBits(), transactionMetadataEntry.getTxnidLeastBits()); - long transactionId = transactionMetadataEntry.getTxnidLeastBits(); - switch (transactionMetadataEntry.getMetadataOp()) { - case NEW: - long txnSequenceId = transactionMetadataEntry.getTxnidLeastBits(); - if (txnMetaMap.containsKey(transactionId)) { - txnMetaMap.get(transactionId).getRight().add(position); - } else { - List positions = new ArrayList<>(); - positions.add(position); - long openTimestamp = transactionMetadataEntry.getStartTime(); - long timeoutAt = transactionMetadataEntry.getTimeoutMs(); - txnMetaMap.put(transactionId, MutablePair.of(new TxnMetaImpl(txnID, - openTimestamp, timeoutAt), positions)); - recoverTracker.handleOpenStatusTransaction(txnSequenceId, timeoutAt + openTimestamp); - } - break; - case ADD_PARTITION: - if (!txnMetaMap.containsKey(transactionId)) { - transactionLog.deletePosition(Collections.singletonList(position)); - } else { - txnMetaMap.get(transactionId).getLeft() - .addProducedPartitions(transactionMetadataEntry.getPartitionsList()); - txnMetaMap.get(transactionId).getRight().add(position); - } - break; - case ADD_SUBSCRIPTION: - if (!txnMetaMap.containsKey(transactionId)) { - transactionLog.deletePosition(Collections.singletonList(position)); - } else { - txnMetaMap.get(transactionId).getLeft() - .addAckedPartitions(subscriptionToTxnSubscription( - transactionMetadataEntry.getSubscriptionsList())); - txnMetaMap.get(transactionId).getRight().add(position); - } - break; - case UPDATE: - if (!txnMetaMap.containsKey(transactionId)) { - transactionLog.deletePosition(Collections.singletonList(position)); - } else { - TxnStatus newStatus = transactionMetadataEntry.getNewStatus(); - txnMetaMap.get(transactionId).getLeft() - .updateTxnStatus(transactionMetadataEntry.getNewStatus(), - transactionMetadataEntry.getExpectedStatus()); - txnMetaMap.get(transactionId).getRight().add(position); - recoverTracker.updateTransactionStatus(txnID.getLeastSigBits(), newStatus); - if (newStatus == TxnStatus.COMMITTED || newStatus == TxnStatus.ABORTED) { - transactionLog.deletePosition(txnMetaMap - .get(transactionId).getRight()).thenAccept(v -> - txnMetaMap.remove(transactionId).getLeft()); + long transactionId = transactionMetadataEntry.getTxnidLeastBits(); + switch (transactionMetadataEntry.getMetadataOp()) { + case NEW: + long txnSequenceId = transactionMetadataEntry.getTxnidLeastBits(); + if (txnMetaMap.containsKey(transactionId)) { + txnMetaMap.get(transactionId).getRight().add(position); + } else { + List positions = new ArrayList<>(); + positions.add(position); + long openTimestamp = transactionMetadataEntry.getStartTime(); + long timeoutAt = transactionMetadataEntry.getTimeoutMs(); + txnMetaMap.put(transactionId, MutablePair.of(new TxnMetaImpl(txnID, + openTimestamp, timeoutAt), positions)); + recoverTracker.handleOpenStatusTransaction(txnSequenceId, + timeoutAt + openTimestamp); } - } - break; - default: - throw new InvalidTxnStatusException("Transaction `" - + txnID + "` load replay metadata operation " - + "from transaction log with unknown operation"); + break; + case ADD_PARTITION: + if (!txnMetaMap.containsKey(transactionId)) { + transactionLog.deletePosition(Collections.singletonList(position)); + } else { + txnMetaMap.get(transactionId).getLeft() + .addProducedPartitions(transactionMetadataEntry.getPartitionsList()); + txnMetaMap.get(transactionId).getRight().add(position); + } + break; + case ADD_SUBSCRIPTION: + if (!txnMetaMap.containsKey(transactionId)) { + transactionLog.deletePosition(Collections.singletonList(position)); + } else { + txnMetaMap.get(transactionId).getLeft() + .addAckedPartitions(subscriptionToTxnSubscription( + transactionMetadataEntry.getSubscriptionsList())); + txnMetaMap.get(transactionId).getRight().add(position); + } + break; + case UPDATE: + if (!txnMetaMap.containsKey(transactionId)) { + transactionLog.deletePosition(Collections.singletonList(position)); + } else { + TxnStatus newStatus = transactionMetadataEntry.getNewStatus(); + txnMetaMap.get(transactionId).getLeft() + .updateTxnStatus(transactionMetadataEntry.getNewStatus(), + transactionMetadataEntry.getExpectedStatus()); + txnMetaMap.get(transactionId).getRight().add(position); + recoverTracker.updateTransactionStatus(txnID.getLeastSigBits(), newStatus); + if (newStatus == TxnStatus.COMMITTED || newStatus == TxnStatus.ABORTED) { + transactionLog.deletePosition(txnMetaMap + .get(transactionId).getRight()).thenAccept(v -> + txnMetaMap.remove(transactionId).getLeft()); + } + } + break; + default: + throw new InvalidTxnStatusException("Transaction `" + + txnID + "` load replay metadata operation " + + "from transaction log with unknown operation"); + } + } catch (InvalidTxnStatusException e) { + transactionLog.deletePosition(Collections.singletonList(position)); + log.error(e.getMessage(), e); } - } catch (InvalidTxnStatusException e) { - transactionLog.deletePosition(Collections.singletonList(position)); - log.error(e.getMessage(), e); } - } - })).start(); + })); + } + return completableFuture; } @Override @@ -198,167 +218,202 @@ public CompletableFuture getTxnMeta(TxnID txnID) { } @Override - public synchronized CompletableFuture newTransaction(long timeOut) { - if (!checkIfReady()) { - return FutureUtil.failedFuture( - new CoordinatorException - .TransactionMetadataStoreStateException(tcID, State.Ready, getState(), "new Transaction")); - } + public CompletableFuture newTransaction(long timeOut) { + CompletableFuture completableFuture = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { + completableFuture.completeExceptionally(new CoordinatorException + .TransactionMetadataStoreStateException(tcID, State.Ready, getState(), "new Transaction")); + return; + } - long mostSigBits = tcID.getId(); - long leastSigBits = sequenceId.incrementAndGet(); - TxnID txnID = new TxnID(mostSigBits, leastSigBits); - long currentTimeMillis = System.currentTimeMillis(); - TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() - .setTxnidMostBits(mostSigBits) - .setTxnidLeastBits(leastSigBits) - .setStartTime(currentTimeMillis) - .setTimeoutMs(timeOut) - .setMetadataOp(TransactionMetadataEntry.TransactionMetadataOp.NEW) - .setLastModificationTime(currentTimeMillis) - .setMaxLocalTxnId(sequenceId.get()); - return transactionLog.append(transactionMetadataEntry) - .thenCompose(position -> { - appendLogCount.increment(); - TxnMeta txn = new TxnMetaImpl(txnID, currentTimeMillis, timeOut); - List positions = new ArrayList<>(); - positions.add(position); - Pair> pair = MutablePair.of(txn, positions); - txnMetaMap.put(leastSigBits, pair); - this.timeoutTracker.addTransaction(leastSigBits, timeOut); - createdTransactionCount.increment(); - return CompletableFuture.completedFuture(txnID); - }); + long mostSigBits = tcID.getId(); + long leastSigBits = sequenceIdGenerator.generateSequenceId(); + TxnID txnID = new TxnID(mostSigBits, leastSigBits); + long currentTimeMillis = System.currentTimeMillis(); + TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() + .setTxnidMostBits(mostSigBits) + .setTxnidLeastBits(leastSigBits) + .setStartTime(currentTimeMillis) + .setTimeoutMs(timeOut) + .setMetadataOp(TransactionMetadataEntry.TransactionMetadataOp.NEW) + .setLastModificationTime(currentTimeMillis) + .setMaxLocalTxnId(sequenceIdGenerator.getCurrentSequenceId()); + transactionLog.append(transactionMetadataEntry) + .whenComplete((position, throwable) -> { + if (throwable != null) { + completableFuture.completeExceptionally(throwable); + } else { + appendLogCount.increment(); + TxnMeta txn = new TxnMetaImpl(txnID, currentTimeMillis, timeOut); + List positions = new ArrayList<>(); + positions.add(position); + Pair> pair = MutablePair.of(txn, positions); + txnMetaMap.put(leastSigBits, pair); + this.timeoutTracker.addTransaction(leastSigBits, timeOut); + createdTransactionCount.increment(); + completableFuture.complete(txnID); + } + }); + }); + return completableFuture; } @Override - public synchronized CompletableFuture addProducedPartitionToTxn(TxnID txnID, List partitions) { - if (!checkIfReady()) { - return FutureUtil.failedFuture( - new CoordinatorException.TransactionMetadataStoreStateException(tcID, - State.Ready, getState(), "add produced partition")); - } - return getTxnPositionPair(txnID).thenCompose(txnMetaListPair -> { - TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() - .setTxnidMostBits(txnID.getMostSigBits()) - .setTxnidLeastBits(txnID.getLeastSigBits()) - .setMetadataOp(TransactionMetadataOp.ADD_PARTITION) - .addAllPartitions(partitions) - .setLastModificationTime(System.currentTimeMillis()) - .setMaxLocalTxnId(sequenceId.get()); + public CompletableFuture addProducedPartitionToTxn(TxnID txnID, List partitions) { + CompletableFuture promise = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { + promise + .completeExceptionally(new CoordinatorException.TransactionMetadataStoreStateException(tcID, + State.Ready, getState(), "add produced partition")); + return; + } + getTxnPositionPair(txnID).thenCompose(txnMetaListPair -> { + TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() + .setTxnidMostBits(txnID.getMostSigBits()) + .setTxnidLeastBits(txnID.getLeastSigBits()) + .setMetadataOp(TransactionMetadataOp.ADD_PARTITION) + .addAllPartitions(partitions) + .setLastModificationTime(System.currentTimeMillis()) + .setMaxLocalTxnId(sequenceIdGenerator.getCurrentSequenceId()); - return transactionLog.append(transactionMetadataEntry) - .thenCompose(position -> { - appendLogCount.increment(); - try { - synchronized (txnMetaListPair.getLeft()) { - txnMetaListPair.getLeft().addProducedPartitions(partitions); - txnMetaMap.get(txnID.getLeastSigBits()).getRight().add(position); + return transactionLog.append(transactionMetadataEntry) + .thenAccept(position -> { + appendLogCount.increment(); + try { + synchronized (txnMetaListPair.getLeft()) { + txnMetaListPair.getLeft().addProducedPartitions(partitions); + txnMetaMap.get(txnID.getLeastSigBits()).getRight().add(position); + } + promise.complete(null); + } catch (InvalidTxnStatusException e) { + transactionLog.deletePosition(Collections.singletonList(position)); + log.error("TxnID : " + txnMetaListPair.getLeft().id().toString() + + " add produced partition error with TxnStatus : " + + txnMetaListPair.getLeft().status().name(), e); + promise.completeExceptionally(e); } - return CompletableFuture.completedFuture(null); - } catch (InvalidTxnStatusException e) { - transactionLog.deletePosition(Collections.singletonList(position)); - log.error("TxnID : " + txnMetaListPair.getLeft().id().toString() - + " add produced partition error with TxnStatus : " - + txnMetaListPair.getLeft().status().name(), e); - return FutureUtil.failedFuture(e); - } - }); + }); + }).exceptionally(ex -> { + promise.completeExceptionally(ex); + return null; + }); }); + return promise; } @Override - public synchronized CompletableFuture addAckedPartitionToTxn(TxnID txnID, + public CompletableFuture addAckedPartitionToTxn(TxnID txnID, List txnSubscriptions) { - if (!checkIfReady()) { - return FutureUtil.failedFuture( - new CoordinatorException.TransactionMetadataStoreStateException(tcID, - State.Ready, getState(), "add acked partition")); - } - return getTxnPositionPair(txnID).thenCompose(txnMetaListPair -> { - TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() - .setTxnidMostBits(txnID.getMostSigBits()) - .setTxnidLeastBits(txnID.getLeastSigBits()) - .setMetadataOp(TransactionMetadataOp.ADD_SUBSCRIPTION) - .addAllSubscriptions(txnSubscriptionToSubscription(txnSubscriptions)) - .setLastModificationTime(System.currentTimeMillis()) - .setMaxLocalTxnId(sequenceId.get()); + CompletableFuture promise = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { + promise.completeExceptionally(new CoordinatorException + .TransactionMetadataStoreStateException(tcID, State.Ready, getState(), "add acked partition")); + return; + } + getTxnPositionPair(txnID).thenCompose(txnMetaListPair -> { + TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() + .setTxnidMostBits(txnID.getMostSigBits()) + .setTxnidLeastBits(txnID.getLeastSigBits()) + .setMetadataOp(TransactionMetadataOp.ADD_SUBSCRIPTION) + .addAllSubscriptions(txnSubscriptionToSubscription(txnSubscriptions)) + .setLastModificationTime(System.currentTimeMillis()) + .setMaxLocalTxnId(sequenceIdGenerator.getCurrentSequenceId()); - return transactionLog.append(transactionMetadataEntry) - .thenCompose(position -> { - appendLogCount.increment(); - try { - synchronized (txnMetaListPair.getLeft()) { - txnMetaListPair.getLeft().addAckedPartitions(txnSubscriptions); - txnMetaMap.get(txnID.getLeastSigBits()).getRight().add(position); + return transactionLog.append(transactionMetadataEntry) + .thenAccept(position -> { + appendLogCount.increment(); + try { + synchronized (txnMetaListPair.getLeft()) { + txnMetaListPair.getLeft().addAckedPartitions(txnSubscriptions); + txnMetaMap.get(txnID.getLeastSigBits()).getRight().add(position); + } + promise.complete(null); + } catch (InvalidTxnStatusException e) { + transactionLog.deletePosition(Collections.singletonList(position)); + log.error("TxnID : " + txnMetaListPair.getLeft().id().toString() + + " add acked subscription error with TxnStatus : " + + txnMetaListPair.getLeft().status().name(), e); + promise.completeExceptionally(e); } - return CompletableFuture.completedFuture(null); - } catch (InvalidTxnStatusException e) { - transactionLog.deletePosition(Collections.singletonList(position)); - log.error("TxnID : " + txnMetaListPair.getLeft().id().toString() - + " add acked subscription error with TxnStatus : " - + txnMetaListPair.getLeft().status().name(), e); - return FutureUtil.failedFuture(e); - } - }); + }); + }).exceptionally(ex -> { + promise.completeExceptionally(ex); + return null; + }); }); + return promise; } @Override - public synchronized CompletableFuture updateTxnStatus(TxnID txnID, TxnStatus newStatus, + public CompletableFuture updateTxnStatus(TxnID txnID, TxnStatus newStatus, TxnStatus expectedStatus, boolean isTimeout) { - if (!checkIfReady()) { - return FutureUtil.failedFuture( - new CoordinatorException.TransactionMetadataStoreStateException(tcID, - State.Ready, getState(), "update transaction status")); - } - return getTxnPositionPair(txnID).thenCompose(txnMetaListPair -> { - if (txnMetaListPair.getLeft().status() == newStatus) { - return CompletableFuture.completedFuture(null); + CompletableFuture promise = new CompletableFuture<>(); + internalPinnedExecutor.execute(() -> { + if (!checkIfReady()) { + promise.completeExceptionally(new CoordinatorException + .TransactionMetadataStoreStateException(tcID, + State.Ready, getState(), "update transaction status")); + return; } - TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() - .setTxnidMostBits(txnID.getMostSigBits()) - .setTxnidLeastBits(txnID.getLeastSigBits()) - .setExpectedStatus(expectedStatus) - .setMetadataOp(TransactionMetadataOp.UPDATE) - .setLastModificationTime(System.currentTimeMillis()) - .setNewStatus(newStatus) - .setMaxLocalTxnId(sequenceId.get()); + getTxnPositionPair(txnID).thenCompose(txnMetaListPair -> { + if (txnMetaListPair.getLeft().status() == newStatus) { + promise.complete(null); + return promise; + } + TransactionMetadataEntry transactionMetadataEntry = new TransactionMetadataEntry() + .setTxnidMostBits(txnID.getMostSigBits()) + .setTxnidLeastBits(txnID.getLeastSigBits()) + .setExpectedStatus(expectedStatus) + .setMetadataOp(TransactionMetadataOp.UPDATE) + .setLastModificationTime(System.currentTimeMillis()) + .setNewStatus(newStatus) + .setMaxLocalTxnId(sequenceIdGenerator.getCurrentSequenceId()); - return transactionLog.append(transactionMetadataEntry).thenCompose(position -> { - appendLogCount.increment(); - try { - synchronized (txnMetaListPair.getLeft()) { - txnMetaListPair.getLeft().updateTxnStatus(newStatus, expectedStatus); - txnMetaListPair.getRight().add(position); - } - if (newStatus == TxnStatus.ABORTING && isTimeout) { - this.transactionTimeoutCount.increment(); - } - if (newStatus == TxnStatus.COMMITTED || newStatus == TxnStatus.ABORTED) { - return transactionLog.deletePosition(txnMetaListPair.getRight()).thenCompose(v -> { - this.transactionMetadataStoreStats - .addTransactionExecutionLatencySample(System.currentTimeMillis() - - txnMetaListPair.getLeft().getOpenTimestamp()); - if (newStatus == TxnStatus.COMMITTED) { - committedTransactionCount.increment(); - } else { - abortedTransactionCount.increment(); + return transactionLog.append(transactionMetadataEntry) + .thenAccept(position -> { + appendLogCount.increment(); + try { + synchronized (txnMetaListPair.getLeft()) { + txnMetaListPair.getLeft().updateTxnStatus(newStatus, expectedStatus); + txnMetaListPair.getRight().add(position); + } + if (newStatus == TxnStatus.ABORTING && isTimeout) { + this.transactionTimeoutCount.increment(); + } + if (newStatus == TxnStatus.COMMITTED || newStatus == TxnStatus.ABORTED) { + this.transactionMetadataStoreStats + .addTransactionExecutionLatencySample(System.currentTimeMillis() + - txnMetaListPair.getLeft().getOpenTimestamp()); + if (newStatus == TxnStatus.COMMITTED) { + committedTransactionCount.increment(); + } else { + abortedTransactionCount.increment(); + } + txnMetaMap.remove(txnID.getLeastSigBits()); + transactionLog.deletePosition(txnMetaListPair.getRight()).exceptionally(ex -> { + log.warn("Failed to delete transaction log position " + + "at end transaction [{}]", txnID); + return null; + }); + } + promise.complete(null); + } catch (InvalidTxnStatusException e) { + transactionLog.deletePosition(Collections.singletonList(position)); + log.error("TxnID : " + txnMetaListPair.getLeft().id().toString() + + " add update txn status error with TxnStatus : " + + txnMetaListPair.getLeft().status().name(), e); + promise.completeExceptionally(e); } - txnMetaMap.remove(txnID.getLeastSigBits()); - return CompletableFuture.completedFuture(null); }); - } - return CompletableFuture.completedFuture(null); - } catch (InvalidTxnStatusException e) { - transactionLog.deletePosition(Collections.singletonList(position)); - log.error("TxnID : " + txnMetaListPair.getLeft().id().toString() - + " add update txn status error with TxnStatus : " - + txnMetaListPair.getLeft().status().name(), e); - return FutureUtil.failedFuture(e); - } + }).exceptionally(ex -> { + promise.completeExceptionally(ex); + return null; }); }); + return promise; } @Override @@ -380,7 +435,7 @@ public TransactionCoordinatorStats getCoordinatorStats() { TransactionCoordinatorStats transactionCoordinatorstats = new TransactionCoordinatorStats(); transactionCoordinatorstats.setLowWaterMark(getLowWaterMark()); transactionCoordinatorstats.setState(getState().name()); - transactionCoordinatorstats.setLeastSigBits(sequenceId.get()); + transactionCoordinatorstats.setLeastSigBits(sequenceIdGenerator.getCurrentSequenceId()); return transactionCoordinatorstats; } @@ -397,15 +452,24 @@ private CompletableFuture>> getTxnPositionPair(TxnI @Override public CompletableFuture closeAsync() { - return transactionLog.closeAsync().thenCompose(v -> { - txnMetaMap.clear(); - this.timeoutTracker.close(); - if (!this.changeToCloseState()) { - return FutureUtil.failedFuture( - new IllegalStateException("Managed ledger transaction metadata store state to close error!")); - } + if (changeToClosingState()) { + // Disable new tasks from being submitted + internalPinnedExecutor.shutdown(); + return transactionLog.closeAsync().thenCompose(v -> { + txnMetaMap.clear(); + this.timeoutTracker.close(); + if (!this.changeToCloseState()) { + return FutureUtil.failedFuture( + new IllegalStateException( + "Managed ledger transaction metadata store state to close error!")); + } + // Shutdown the ExecutorService + MoreExecutors.shutdownAndAwaitTermination(internalPinnedExecutor, Duration.ofSeconds(5L)); + return CompletableFuture.completedFuture(null); + }); + } else { return CompletableFuture.completedFuture(null); - }); + } } @Override @@ -459,4 +523,4 @@ public static List subscriptionToTxnSubscription( public ManagedLedger getManagedLedger() { return this.transactionLog.getManagedLedger(); } -} \ No newline at end of file +} diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java index 36b19585de931..22a58ebcc9766 100644 --- a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionMetadataStoreProvider.java @@ -45,10 +45,14 @@ public CompletableFuture openStore(TransactionCoordina ManagedLedgerConfig managedLedgerConfig, TransactionTimeoutTracker timeoutTracker, TransactionRecoverTracker recoverTracker) { + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl txnLog = new MLTransactionLogImpl(transactionCoordinatorId, managedLedgerFactory, managedLedgerConfig); - return txnLog.initialize().thenApply(__ -> - new MLTransactionMetadataStore(transactionCoordinatorId, txnLog, timeoutTracker, recoverTracker)); + // MLTransactionLogInterceptor will init sequenceId and update the sequenceId to managedLedger properties. + return txnLog.initialize().thenCompose(__ -> + new MLTransactionMetadataStore(transactionCoordinatorId, txnLog, timeoutTracker, + mlTransactionSequenceIdGenerator).init(recoverTracker)); } } \ No newline at end of file diff --git a/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionSequenceIdGenerator.java b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionSequenceIdGenerator.java new file mode 100644 index 0000000000000..c68997b1c0524 --- /dev/null +++ b/pulsar-transaction/coordinator/src/main/java/org/apache/pulsar/transaction/coordinator/impl/MLTransactionSequenceIdGenerator.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.transaction.coordinator.impl; + +import io.netty.buffer.ByteBuf; +import org.apache.bookkeeper.client.LedgerHandle; +import org.apache.bookkeeper.client.api.LedgerEntry; +import org.apache.bookkeeper.mledger.impl.OpAddEntry; +import org.apache.bookkeeper.mledger.intercept.ManagedLedgerInterceptor; +import org.apache.pulsar.transaction.coordinator.proto.TransactionMetadataEntry; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Store max sequenceID in ManagedLedger properties, in order to recover transaction log. + */ +public class MLTransactionSequenceIdGenerator implements ManagedLedgerInterceptor { + + private static final Logger log = LoggerFactory.getLogger(MLTransactionSequenceIdGenerator.class); + private static final long TC_ID_NOT_USED = -1L; + public static final String MAX_LOCAL_TXN_ID = "max_local_txn_id"; + private final AtomicLong sequenceId = new AtomicLong(TC_ID_NOT_USED); + + @Override + public OpAddEntry beforeAddEntry(OpAddEntry op, int numberOfMessages) { + return op; + } + + // When all of ledger have been deleted, we will generate sequenceId from managedLedger properties + @Override + public void onManagedLedgerPropertiesInitialize(Map propertiesMap) { + if (propertiesMap == null || propertiesMap.size() == 0) { + return; + } + + if (propertiesMap.containsKey(MAX_LOCAL_TXN_ID)) { + sequenceId.set(Long.parseLong(propertiesMap.get(MAX_LOCAL_TXN_ID))); + } + } + + // When we don't roll over ledger, we can init sequenceId from the getLastAddConfirmed transaction metadata entry + @Override + public CompletableFuture onManagedLedgerLastLedgerInitialize(String name, LedgerHandle lh) { + CompletableFuture promise = new CompletableFuture<>(); + if (lh.getLastAddConfirmed() >= 0) { + lh.readAsync(lh.getLastAddConfirmed(), lh.getLastAddConfirmed()).whenComplete((entries, ex) -> { + if (ex != null) { + log.error("[{}] Read last entry error.", name, ex); + promise.completeExceptionally(ex); + } else { + if (entries != null) { + try { + LedgerEntry ledgerEntry = entries.getEntry(lh.getLastAddConfirmed()); + if (ledgerEntry != null) { + TransactionMetadataEntry lastConfirmEntry = new TransactionMetadataEntry(); + ByteBuf buffer = ledgerEntry.getEntryBuffer(); + lastConfirmEntry.parseFrom(buffer, buffer.readableBytes()); + this.sequenceId.set(lastConfirmEntry.getMaxLocalTxnId()); + } + entries.close(); + promise.complete(null); + } catch (Exception e) { + entries.close(); + log.error("[{}] Failed to recover the tc sequenceId from the last add confirmed entry.", + name, e); + promise.completeExceptionally(e); + } + } else { + promise.complete(null); + } + } + }); + } else { + promise.complete(null); + } + return promise; + } + + // roll over ledger will update sequenceId to managedLedger properties + @Override + public void onUpdateManagedLedgerInfo(Map propertiesMap) { + propertiesMap.put(MAX_LOCAL_TXN_ID, sequenceId.get() + ""); + } + + long generateSequenceId() { + return sequenceId.incrementAndGet(); + } + + long getCurrentSequenceId() { + return sequenceId.get(); + } +} diff --git a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java index e9b3e0c08d122..aafe54e60694e 100644 --- a/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java +++ b/pulsar-transaction/coordinator/src/test/java/org/apache/pulsar/transaction/coordinator/MLTransactionMetadataStoreTest.java @@ -31,6 +31,7 @@ import org.apache.pulsar.transaction.coordinator.exceptions.CoordinatorException; import org.apache.pulsar.transaction.coordinator.exceptions.CoordinatorException.TransactionNotFoundException; import org.apache.pulsar.transaction.coordinator.impl.MLTransactionLogImpl; +import org.apache.pulsar.transaction.coordinator.impl.MLTransactionSequenceIdGenerator; import org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStore; import org.apache.pulsar.transaction.coordinator.proto.TxnStatus; import org.apache.pulsar.transaction.coordinator.test.MockedBookKeeperTestCase; @@ -65,12 +66,17 @@ public void testTransactionOperation() throws Exception { @Cleanup("shutdown") ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc, factoryConf); TransactionCoordinatorID transactionCoordinatorID = new TransactionCoordinatorID(1); + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - new ManagedLedgerConfig()); + managedLedgerConfig); mlTransactionLog.initialize().join(); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + new TransactionTimeoutTrackerImpl(), + mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); int checkReplayRetryCount = 0; while (true) { checkReplayRetryCount++; @@ -122,13 +128,13 @@ public void testTransactionOperation() throws Exception { } } - @DataProvider(name = "isUseManagedLedger") + @DataProvider(name = "isUseManagedLedgerProperties") public Object[][] versions() { return new Object[][] { { true }, { false } }; } - @Test(dataProvider = "isUseManagedLedger") - public void testRecoverSequenceId(boolean isUseManagedLedger) throws Exception { + @Test(dataProvider = "isUseManagedLedgerProperties") + public void testRecoverSequenceId(boolean isUseManagedLedgerProperties) throws Exception { ManagedLedgerFactoryConfig factoryConf = new ManagedLedgerFactoryConfig(); factoryConf.setMaxCacheSize(0); @@ -136,18 +142,21 @@ public void testRecoverSequenceId(boolean isUseManagedLedger) throws Exception { ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc, factoryConf); TransactionCoordinatorID transactionCoordinatorID = new TransactionCoordinatorID(1); ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); managedLedgerConfig.setMaxEntriesPerLedger(3); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, managedLedgerConfig); mlTransactionLog.initialize().join(); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); Awaitility.await().until(transactionMetadataStore::checkIfReady); TxnID txnID = transactionMetadataStore.newTransaction(20000).get(); transactionMetadataStore.updateTxnStatus(txnID, TxnStatus.COMMITTING, TxnStatus.OPEN, false).get(); - if (isUseManagedLedger) { + if (isUseManagedLedgerProperties) { transactionMetadataStore.updateTxnStatus(txnID, TxnStatus.COMMITTED, TxnStatus.COMMITTING, false).get(); } assertEquals(txnID.getLeastSigBits(), 0); @@ -155,16 +164,23 @@ public void testRecoverSequenceId(boolean isUseManagedLedger) throws Exception { field.setAccessible(true); ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) field.get(mlTransactionLog); Position position = managedLedger.getLastConfirmedEntry(); - - if (isUseManagedLedger) { + if (isUseManagedLedgerProperties) { + Field stateUpdater = ManagedLedgerImpl.class.getDeclaredField("state"); + stateUpdater.setAccessible(true); + stateUpdater.set(managedLedger, ManagedLedgerImpl.State.LedgerOpened); + managedLedger.rollCurrentLedgerIfFull(); Awaitility.await().until(() -> { - managedLedger.rollCurrentLedgerIfFull(); return !managedLedger.ledgerExists(position.getLedgerId()); }); } + mlTransactionLog.closeAsync().get(); + mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, + managedLedgerConfig); + mlTransactionLog.initialize().join(); transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); Awaitility.await().until(transactionMetadataStore::checkIfReady); txnID = transactionMetadataStore.newTransaction(100000).get(); @@ -181,12 +197,16 @@ public void testInitTransactionReader() throws Exception { TransactionCoordinatorID transactionCoordinatorID = new TransactionCoordinatorID(1); ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); managedLedgerConfig.setMaxEntriesPerLedger(2); + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, managedLedgerConfig); mlTransactionLog.initialize().join(); + MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); int checkReplayRetryCount = 0; while (true) { if (checkReplayRetryCount > 3) { @@ -224,11 +244,13 @@ public void testInitTransactionReader() throws Exception { transactionMetadataStore.closeAsync(); MLTransactionLogImpl txnLog2 = new MLTransactionLogImpl(transactionCoordinatorID, factory, - new ManagedLedgerConfig()); + managedLedgerConfig); txnLog2.initialize().join(); + MLTransactionMetadataStore transactionMetadataStoreTest = new MLTransactionMetadataStore(transactionCoordinatorID, - txnLog2, new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + txnLog2, new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStoreTest.init(new TransactionRecoverTrackerImpl()).get(); while (true) { if (checkReplayRetryCount > 6) { @@ -288,12 +310,16 @@ public void testDeleteLog() throws Exception { @Cleanup("shutdown") ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc, factoryConf); TransactionCoordinatorID transactionCoordinatorID = new TransactionCoordinatorID(1); + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - new ManagedLedgerConfig()); + managedLedgerConfig); mlTransactionLog.initialize().join(); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); int checkReplayRetryCount = 0; while (true) { if (checkReplayRetryCount > 3) { @@ -351,13 +377,16 @@ public void testRecoverWhenDeleteFromCursor() throws Exception { @Cleanup("shutdown") ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc, factoryConf); TransactionCoordinatorID transactionCoordinatorID = new TransactionCoordinatorID(1); + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - new ManagedLedgerConfig()); + managedLedgerConfig); mlTransactionLog.initialize().join(); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); - + new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); Awaitility.await().until(transactionMetadataStore::checkIfReady); @@ -370,11 +399,12 @@ public void testRecoverWhenDeleteFromCursor() throws Exception { transactionMetadataStore.updateTxnStatus(txnID2, TxnStatus.ABORTED, TxnStatus.ABORTING, false).get(); mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - new ManagedLedgerConfig()); + managedLedgerConfig); mlTransactionLog.initialize().join(); transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); Awaitility.await().until(transactionMetadataStore::checkIfReady); } @@ -387,12 +417,16 @@ public void testManageLedgerWriteFailState() throws Exception { @Cleanup("shutdown") ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(metadataStore, bkc, factoryConf); TransactionCoordinatorID transactionCoordinatorID = new TransactionCoordinatorID(1); + ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig(); + MLTransactionSequenceIdGenerator mlTransactionSequenceIdGenerator = new MLTransactionSequenceIdGenerator(); + managedLedgerConfig.setManagedLedgerInterceptor(mlTransactionSequenceIdGenerator); MLTransactionLogImpl mlTransactionLog = new MLTransactionLogImpl(transactionCoordinatorID, factory, - new ManagedLedgerConfig()); + managedLedgerConfig); mlTransactionLog.initialize().join(); MLTransactionMetadataStore transactionMetadataStore = new MLTransactionMetadataStore(transactionCoordinatorID, mlTransactionLog, - new TransactionTimeoutTrackerImpl(), new TransactionRecoverTrackerImpl()); + new TransactionTimeoutTrackerImpl(), mlTransactionSequenceIdGenerator); + transactionMetadataStore.init(new TransactionRecoverTrackerImpl()).get(); Awaitility.await().until(transactionMetadataStore::checkIfReady); transactionMetadataStore.newTransaction(5000).get(); diff --git a/pulsar-transaction/pom.xml b/pulsar-transaction/pom.xml index 053a494039d4b..2760271368785 100644 --- a/pulsar-transaction/pom.xml +++ b/pulsar-transaction/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 pulsar-transaction-parent diff --git a/pulsar-websocket/pom.xml b/pulsar-websocket/pom.xml index 35377d7ed3c6e..64a46af2daa8d 100644 --- a/pulsar-websocket/pom.xml +++ b/pulsar-websocket/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java index a1c76d26fd2c4..6dd44cb9b7551 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ConsumerHandler.java @@ -24,9 +24,14 @@ import com.google.common.base.Enums; import com.google.common.base.Splitter; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; import java.io.IOException; import java.util.Base64; import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLongFieldUpdater; @@ -45,6 +50,7 @@ import org.apache.pulsar.client.api.PulsarClientException.AlreadyClosedException; import org.apache.pulsar.client.api.SubscriptionMode; import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.impl.BatchMessageIdImpl; import org.apache.pulsar.client.impl.ConsumerBuilderImpl; import org.apache.pulsar.common.util.Codec; import org.apache.pulsar.common.util.DateFormatter; @@ -87,6 +93,12 @@ public class ConsumerHandler extends AbstractWebSocketHandler { private static final AtomicLongFieldUpdater MSG_DELIVERED_COUNTER_UPDATER = AtomicLongFieldUpdater.newUpdater(ConsumerHandler.class, "msgDeliveredCounter"); + // Make sure use the same BatchMessageIdImpl to acknowledge the batch message, otherwise the BatchMessageAcker + // of the BatchMessageIdImpl will not complete. + private Cache messageIdCache = CacheBuilder.newBuilder() + .expireAfterWrite(1, TimeUnit.HOURS) + .build(); + public ConsumerHandler(WebSocketService service, HttpServletRequest request, ServletUpgradeResponse response) { super(service, request, response); @@ -95,7 +107,7 @@ public ConsumerHandler(WebSocketService service, HttpServletRequest request, Ser this.numMsgsDelivered = new LongAdder(); this.numBytesDelivered = new LongAdder(); this.numMsgsAcked = new LongAdder(); - this.pullMode = Boolean.valueOf(queryParams.get("pullMode")); + this.pullMode = Boolean.parseBoolean(queryParams.get("pullMode")); try { // checkAuth() and getConsumerConfiguration() should be called after assigning a value to this.subscription @@ -148,6 +160,7 @@ private void receiveMessage() { dm.properties = msg.getProperties(); dm.publishTime = DateFormatter.format(msg.getPublishTime()); dm.redeliveryCount = msg.getRedeliveryCount(); + dm.encryptionContext = msg.getEncryptionCtx().orElse(null); if (msg.getEventTime() != 0) { dm.eventTime = DateFormatter.format(msg.getEventTime()); } @@ -156,6 +169,8 @@ private void receiveMessage() { } final long msgSize = msg.getData().length; + messageIdCache.put(dm.messageId, msg.getMessageId()); + try { getSession().getRemote() .sendString(ObjectMapperFactory.getThreadLocal().writeValueAsString(dm), new WriteCallback() { @@ -232,6 +247,10 @@ public void onWebSocketText(String message) { // Check and notify consumer if reached end of topic. private void handleEndOfTopic() { + if (log.isDebugEnabled()) { + log.debug("[{}/{}] Received check reach the end of topic request from {} ", consumer.getTopic(), + subscription, getRemote().getInetSocketAddress().toString()); + } try { String msg = ObjectMapperFactory.getThreadLocal().writeValueAsString( new EndOfTopicResponse(consumer.hasReachedEndOfTopic())); @@ -259,6 +278,10 @@ public void writeSuccess() { } private void handleUnsubscribe(ConsumerCommand command) throws PulsarClientException { + if (log.isDebugEnabled()) { + log.debug("[{}/{}] Received unsubscribe request from {} ", consumer.getTopic(), + subscription, getRemote().getInetSocketAddress().toString()); + } consumer.unsubscribe(); } @@ -276,18 +299,43 @@ private void handleAck(ConsumerCommand command) throws IOException { // We should have received an ack MessageId msgId = MessageId.fromByteArrayWithTopic(Base64.getDecoder().decode(command.messageId), topic.toString()); - consumer.acknowledgeAsync(msgId).thenAccept(consumer -> numMsgsAcked.increment()); + if (log.isDebugEnabled()) { + log.debug("[{}/{}] Received ack request of message {} from {} ", consumer.getTopic(), + subscription, msgId, getRemote().getInetSocketAddress().toString()); + } + + MessageId originalMsgId = messageIdCache.asMap().remove(command.messageId); + if (originalMsgId != null) { + consumer.acknowledgeAsync(originalMsgId).thenAccept(consumer -> numMsgsAcked.increment()); + } else { + consumer.acknowledgeAsync(msgId).thenAccept(consumer -> numMsgsAcked.increment()); + } + checkResumeReceive(); } private void handleNack(ConsumerCommand command) throws IOException { MessageId msgId = MessageId.fromByteArrayWithTopic(Base64.getDecoder().decode(command.messageId), topic.toString()); - consumer.negativeAcknowledge(msgId); + if (log.isDebugEnabled()) { + log.debug("[{}/{}] Received negative ack request of message {} from {} ", consumer.getTopic(), + subscription, msgId, getRemote().getInetSocketAddress().toString()); + } + + MessageId originalMsgId = messageIdCache.asMap().remove(command.messageId); + if (originalMsgId != null) { + consumer.negativeAcknowledge(originalMsgId); + } else { + consumer.negativeAcknowledge(msgId); + } checkResumeReceive(); } private void handlePermit(ConsumerCommand command) throws IOException { + if (log.isDebugEnabled()) { + log.debug("[{}/{}] Received {} permits request from {} ", consumer.getTopic(), + subscription, command.permitMessages, getRemote().getInetSocketAddress().toString()); + } if (command.permitMessages == null) { throw new IOException("Missing required permitMessages field for 'permit' command"); } diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ProducerHandler.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ProducerHandler.java index 9552d42462cb6..9b6593d0274fe 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ProducerHandler.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ProducerHandler.java @@ -131,6 +131,10 @@ public void close() throws IOException { @Override public void onWebSocketText(String message) { + if (log.isDebugEnabled()) { + log.debug("[{}] Received new message from producer {} ", producer.getTopic(), + getRemote().getInetSocketAddress().toString()); + } ProducerMessage sendRequest; byte[] rawPayload = null; String requestContext = null; @@ -188,6 +192,10 @@ public void onWebSocketText(String message) { final long now = System.nanoTime(); builder.sendAsync().thenAccept(msgId -> { + if (log.isDebugEnabled()) { + log.debug("[{}] Success fully write the message to broker with returned message ID {} from producer {}", + producer.getTopic(), msgId, getRemote().getInetSocketAddress().toString()); + } updateSentMsgStats(msgSize, TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - now)); if (isConnected()) { String messageId = Base64.getEncoder().encodeToString(msgId.toByteArray()); diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ReaderHandler.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ReaderHandler.java index ef0279dfaeea3..56b419f51d880 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ReaderHandler.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/ReaderHandler.java @@ -36,6 +36,7 @@ import org.apache.pulsar.client.api.ReaderBuilder; import org.apache.pulsar.client.api.SubscriptionType; import org.apache.pulsar.client.impl.MessageIdImpl; +import org.apache.pulsar.client.impl.MultiTopicsReaderImpl; import org.apache.pulsar.client.impl.ReaderImpl; import org.apache.pulsar.common.util.DateFormatter; import org.apache.pulsar.common.util.ObjectMapperFactory; @@ -103,8 +104,11 @@ public ReaderHandler(WebSocketService service, HttpServletRequest request, Servl } this.reader = builder.create(); - - this.subscription = ((ReaderImpl) this.reader).getConsumer().getSubscription(); + Consumer consumer = getConsumer(); + if (consumer == null) { + throw new IllegalArgumentException(String.format("Illegal Reader Type %s", reader.getClass())); + } + this.subscription = consumer.getSubscription(); if (!this.service.addReader(this)) { log.warn("[{}:{}] Failed to add reader handler for topic {}", request.getRemoteAddr(), request.getRemotePort(), topic); @@ -265,7 +269,13 @@ public void close() throws IOException { } public Consumer getConsumer() { - return reader != null ? ((ReaderImpl) reader).getConsumer() : null; + if (reader instanceof MultiTopicsReaderImpl) { + return ((MultiTopicsReaderImpl) reader).getMultiTopicsConsumer(); + } else if (reader instanceof ReaderImpl) { + return ((ReaderImpl) reader).getConsumer(); + } else { + return null; + } } public String getSubscription() { diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java index 0753dd282f6da..7a0f19bad1086 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java @@ -37,6 +37,7 @@ import org.apache.pulsar.client.api.ClientBuilder; import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.internal.PropertiesUtils; import org.apache.pulsar.common.configuration.PulsarConfigurationLoader; import org.apache.pulsar.common.policies.data.ClusterData; import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap; @@ -81,9 +82,17 @@ public WebSocketService(WebSocketProxyConfiguration config) { public WebSocketService(ClusterData localCluster, ServiceConfiguration config) { this.config = config; this.localCluster = localCluster; - this.topicProducerMap = new ConcurrentOpenHashMap<>(); - this.topicConsumerMap = new ConcurrentOpenHashMap<>(); - this.topicReaderMap = new ConcurrentOpenHashMap<>(); + this.topicProducerMap = + ConcurrentOpenHashMap.>newBuilder() + .build(); + this.topicConsumerMap = + ConcurrentOpenHashMap.>newBuilder() + .build(); + this.topicReaderMap = + ConcurrentOpenHashMap.>newBuilder() + .build(); this.proxyStats = new ProxyStats(this); } @@ -173,6 +182,11 @@ private PulsarClient createClientInstance(ClusterData clusterData) throws IOExce .ioThreads(config.getWebSocketNumIoThreads()) // .connectionsPerBroker(config.getWebSocketConnectionsPerBroker()); + // Apply all arbitrary configuration. This must be called before setting any fields annotated as + // @Secret on the ClientConfigurationData object because of the way they are serialized. + // See https://github.com/apache/pulsar/issues/8509 for more information. + clientBuilder.loadConf(PropertiesUtils.filterAndMapProperties(config.getProperties(), "brokerClient_")); + if (isNotBlank(config.getBrokerClientAuthenticationPlugin()) && isNotBlank(config.getBrokerClientAuthenticationParameters())) { clientBuilder.authentication(config.getBrokerClientAuthenticationPlugin(), @@ -190,7 +204,6 @@ && isNotBlank(config.getBrokerClientAuthenticationParameters())) { } else { clientBuilder.serviceUrl(clusterData.getServiceUrl()); } - return clientBuilder.build(); } @@ -247,7 +260,8 @@ public boolean isAuthorizationEnabled() { public boolean addProducer(ProducerHandler producer) { return topicProducerMap - .computeIfAbsent(producer.getProducer().getTopic(), topic -> new ConcurrentOpenHashSet<>()) + .computeIfAbsent(producer.getProducer().getTopic(), + topic -> ConcurrentOpenHashSet.newBuilder().build()) .add(producer); } @@ -265,7 +279,8 @@ public boolean removeProducer(ProducerHandler producer) { public boolean addConsumer(ConsumerHandler consumer) { return topicConsumerMap - .computeIfAbsent(consumer.getConsumer().getTopic(), topic -> new ConcurrentOpenHashSet<>()) + .computeIfAbsent(consumer.getConsumer().getTopic(), topic -> + ConcurrentOpenHashSet.newBuilder().build()) .add(consumer); } @@ -282,7 +297,8 @@ public boolean removeConsumer(ConsumerHandler consumer) { } public boolean addReader(ReaderHandler reader) { - return topicReaderMap.computeIfAbsent(reader.getConsumer().getTopic(), topic -> new ConcurrentOpenHashSet<>()) + return topicReaderMap.computeIfAbsent(reader.getConsumer().getTopic(), topic -> + ConcurrentOpenHashSet.newBuilder().build()) .add(reader); } diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/data/ConsumerMessage.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/data/ConsumerMessage.java index 9660c95be65ca..9091a7eb64ee9 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/data/ConsumerMessage.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/data/ConsumerMessage.java @@ -22,6 +22,7 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; +import org.apache.pulsar.common.api.EncryptionContext; @JsonInclude(Include.NON_NULL) public class ConsumerMessage { @@ -32,5 +33,7 @@ public class ConsumerMessage { public int redeliveryCount; public String eventTime; + public EncryptionContext encryptionContext; + public String key; } diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/ProxyServer.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/ProxyServer.java index 3179bb452a980..85b7e2626cec4 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/ProxyServer.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/ProxyServer.java @@ -36,7 +36,7 @@ import org.apache.pulsar.broker.web.JettyRequestLogFactory; import org.apache.pulsar.broker.web.WebExecutorThreadPool; import org.apache.pulsar.client.api.PulsarClientException; -import org.apache.pulsar.common.util.SecurityUtility; +import org.apache.pulsar.jetty.tls.JettySslContextFactory; import org.eclipse.jetty.server.Handler; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; @@ -76,15 +76,35 @@ public ProxyServer(WebSocketProxyConfiguration config) // TLS enabled connector if (config.getWebServicePortTls().isPresent()) { try { - SslContextFactory sslCtxFactory = SecurityUtility.createSslContextFactory( - config.isTlsAllowInsecureConnection(), - config.getTlsTrustCertsFilePath(), - config.getTlsCertificateFilePath(), - config.getTlsKeyFilePath(), - config.isTlsRequireTrustedClientCertOnConnect(), - true, - config.getTlsCertRefreshCheckDurationSec()); - connectorTls = new ServerConnector(server, -1, -1, sslCtxFactory); + SslContextFactory sslCtxFactory; + if (config.isTlsEnabledWithKeyStore()) { + sslCtxFactory = JettySslContextFactory.createServerSslContextWithKeystore( + config.getTlsProvider(), + config.getTlsKeyStoreType(), + config.getTlsKeyStore(), + config.getTlsKeyStorePassword(), + config.isTlsAllowInsecureConnection(), + config.getTlsTrustStoreType(), + config.getTlsTrustStore(), + config.getTlsTrustStorePassword(), + config.isTlsRequireTrustedClientCertOnConnect(), + config.getWebServiceTlsCiphers(), + config.getWebServiceTlsProtocols(), + config.getTlsCertRefreshCheckDurationSec() + ); + } else { + sslCtxFactory = JettySslContextFactory.createServerSslContext( + config.getTlsProvider(), + config.isTlsAllowInsecureConnection(), + config.getTlsTrustCertsFilePath(), + config.getTlsCertificateFilePath(), + config.getTlsKeyFilePath(), + config.isTlsRequireTrustedClientCertOnConnect(), + config.getWebServiceTlsCiphers(), + config.getWebServiceTlsProtocols(), + config.getTlsCertRefreshCheckDurationSec()); + } + connectorTls = new ServerConnector(server, sslCtxFactory); connectorTls.setPort(config.getWebServicePortTls().get()); connectors.add(connectorTls); } catch (Exception e) { diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketProxyConfiguration.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketProxyConfiguration.java index d37d14208fdac..deb8ab3e032b6 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketProxyConfiguration.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/service/WebSocketProxyConfiguration.java @@ -21,16 +21,16 @@ import java.util.Optional; import java.util.Properties; import java.util.Set; - +import java.util.TreeSet; +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; import org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider; import org.apache.pulsar.common.configuration.FieldContext; import org.apache.pulsar.common.configuration.PulsarConfiguration; import com.google.common.collect.Sets; -import lombok.Getter; -import lombok.Setter; - @Getter @Setter public class WebSocketProxyConfiguration implements PulsarConfiguration { @@ -160,6 +160,61 @@ public class WebSocketProxyConfiguration implements PulsarConfiguration { @FieldContext(doc = "TLS cert refresh duration (in seconds). 0 means checking every new connection.") private long tlsCertRefreshCheckDurationSec = 300; + /**** --- KeyStore TLS config variables. --- ****/ + @FieldContext( + doc = "Enable TLS with KeyStore type configuration for WebSocket" + ) + private boolean tlsEnabledWithKeyStore = false; + + @FieldContext( + doc = "Specify the TLS provider for the WebSocket service: SunJSSE, Conscrypt and etc." + ) + private String tlsProvider = "Conscrypt"; + + @FieldContext( + doc = "TLS KeyStore type configuration in WebSocket: JKS, PKCS12" + ) + private String tlsKeyStoreType = "JKS"; + + @FieldContext( + doc = "TLS KeyStore path in WebSocket" + ) + private String tlsKeyStore = null; + + @FieldContext( + doc = "TLS KeyStore password for WebSocket" + ) + @ToString.Exclude + private String tlsKeyStorePassword = null; + + @FieldContext( + doc = "TLS TrustStore type configuration in WebSocket: JKS, PKCS12" + ) + private String tlsTrustStoreType = "JKS"; + + @FieldContext( + doc = "TLS TrustStore path in WebSocket" + ) + private String tlsTrustStore = null; + + @FieldContext( + doc = "TLS TrustStore password for WebSocket, null means empty password." + ) + @ToString.Exclude + private String tlsTrustStorePassword = null; + + @FieldContext( + doc = "Specify the tls protocols the proxy's web service will use to negotiate during TLS Handshake.\n\n" + + "Example:- [TLSv1.3, TLSv1.2]" + ) + private Set webServiceTlsProtocols = new TreeSet<>(); + + @FieldContext( + doc = "Specify the tls cipher the proxy's web service will use to negotiate during TLS Handshake.\n\n" + + "Example:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]" + ) + private Set webServiceTlsCiphers = new TreeSet<>(); + @FieldContext(doc = "Key-value properties. Types are all String") private Properties properties = new Properties(); } diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/JvmMetrics.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/JvmMetrics.java index 905c49bf56205..112fc205f08cb 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/JvmMetrics.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/JvmMetrics.java @@ -19,6 +19,7 @@ package org.apache.pulsar.websocket.stats; import static org.apache.pulsar.common.stats.Metrics.create; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import java.lang.management.ManagementFactory; import java.util.Map; @@ -52,7 +53,8 @@ public class JvmMetrics { private static final Logger log = LoggerFactory.getLogger(JvmMetrics.class); public JvmMetrics(WebSocketService service) { - service.getExecutor().scheduleAtFixedRate(this::updateGcStats, 0, 1, TimeUnit.MINUTES); + service.getExecutor() + .scheduleAtFixedRate(catchingAndLoggingThrowables(this::updateGcStats), 0, 1, TimeUnit.MINUTES); } public Metrics generate() { diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java index ef7523b84fffc..8fa91130ae440 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java @@ -18,6 +18,7 @@ */ package org.apache.pulsar.websocket.stats; +import static org.apache.pulsar.common.util.Runnables.catchingAndLoggingThrowables; import static org.apache.pulsar.websocket.ProducerHandler.ENTRY_LATENCY_BUCKETS_USEC; import java.util.List; @@ -51,11 +52,14 @@ public ProxyStats(WebSocketService service) { super(); this.service = service; this.jvmMetrics = new JvmMetrics(service); - this.topicStats = new ConcurrentOpenHashMap<>(); + this.topicStats = + ConcurrentOpenHashMap.newBuilder() + .build(); this.metricsCollection = Lists.newArrayList(); this.tempMetricsCollection = Lists.newArrayList(); // schedule stat generation task every 1 minute - service.getExecutor().scheduleAtFixedRate(() -> generate(), 120, 60, TimeUnit.SECONDS); + service.getExecutor() + .scheduleAtFixedRate(catchingAndLoggingThrowables(this::generate), 120, 60, TimeUnit.SECONDS); } /** diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/StatsBuckets.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/StatsBuckets.java index 82f34d1b9de81..8625146a8659e 100644 --- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/StatsBuckets.java +++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/StatsBuckets.java @@ -61,7 +61,7 @@ public void addAll(StatsBuckets other) { for (int i = 0; i < buckets.length; i++) { buckets[i].add(other.values[i]); } - sumCounter.add(other.count); + sumCounter.add(other.sum); } diff --git a/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/AbstractWebSocketHandlerTest.java b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/AbstractWebSocketHandlerTest.java index 9bd9907677996..782e05ea62577 100644 --- a/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/AbstractWebSocketHandlerTest.java +++ b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/AbstractWebSocketHandlerTest.java @@ -369,11 +369,12 @@ public void consumerBuilderTest() throws IOException { consumerHandler.clearQueryParams(); consumerHandler.putQueryParam("receiverQueueSize", "1001"); consumerHandler.putQueryParam("deadLetterTopic", "dead-letter-topic"); + consumerHandler.putQueryParam("maxRedeliverCount", "3"); conf = consumerHandler.getConf(); // receive queue size is the minimum value of default value (1000) and user defined value(1001) assertEquals(conf.getReceiverQueueSize(), 1000); assertEquals(conf.getDeadLetterPolicy().getDeadLetterTopic(), "dead-letter-topic"); - assertEquals(conf.getDeadLetterPolicy().getMaxRedeliverCount(), 0); + assertEquals(conf.getDeadLetterPolicy().getMaxRedeliverCount(), 3); } } diff --git a/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/ReaderHandlerTest.java b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/ReaderHandlerTest.java new file mode 100644 index 0000000000000..7dfa8b6e3146b --- /dev/null +++ b/pulsar-websocket/src/test/java/org/apache/pulsar/websocket/ReaderHandlerTest.java @@ -0,0 +1,218 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.websocket; + +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.Reader; +import org.apache.pulsar.client.api.ReaderBuilder; +import org.apache.pulsar.client.impl.ConsumerImpl; +import org.apache.pulsar.client.impl.MultiTopicsConsumerImpl; +import org.apache.pulsar.client.impl.MultiTopicsReaderImpl; +import org.apache.pulsar.client.impl.ReaderImpl; +import org.eclipse.jetty.websocket.servlet.ServletUpgradeResponse; +import org.testng.Assert; +import org.testng.annotations.Test; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class ReaderHandlerTest { + + @Test + @SuppressWarnings("unchecked") + public void testCreateReaderImp() throws IOException { + final String subName = "readerImpSubscription"; + // mock data + WebSocketService wss = mock(WebSocketService.class); + PulsarClient mockedClient = mock(PulsarClient.class); + when(wss.getPulsarClient()).thenReturn(mockedClient); + ReaderBuilder mockedReaderBuilder = mock(ReaderBuilder.class); + when(mockedClient.newReader()).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.topic(any())).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.startMessageId(any())).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.receiverQueueSize(anyInt())).thenReturn(mockedReaderBuilder); + ReaderImpl mockedReader = mock(ReaderImpl.class); + when(mockedReaderBuilder.create()).thenReturn(mockedReader); + ConsumerImpl consumerImp = mock(ConsumerImpl.class); + when(consumerImp.getSubscription()).thenReturn(subName); + when(mockedReader.getConsumer()).thenReturn(consumerImp); + HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getRequestURI()).thenReturn("/ws/v2/producer/persistent/my-property/my-ns/my-topic"); + // create reader handler + HttpServletResponse response = spy(HttpServletResponse.class); + ServletUpgradeResponse servletUpgradeResponse = new ServletUpgradeResponse(response); + ReaderHandler readerHandler = new ReaderHandler(wss, request, servletUpgradeResponse); + // verify success + Assert.assertEquals(readerHandler.getSubscription(), subName); + // Verify consumer is returned + readerHandler.getConsumer(); + } + + @Test + @SuppressWarnings("unchecked") + public void testCreateMultipleTopicReaderImp() throws IOException { + final String subName = "multipleTopicReaderImpSubscription"; + // mock data + WebSocketService wss = mock(WebSocketService.class); + PulsarClient mockedClient = mock(PulsarClient.class); + when(wss.getPulsarClient()).thenReturn(mockedClient); + ReaderBuilder mockedReaderBuilder = mock(ReaderBuilder.class); + when(mockedClient.newReader()).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.topic(any())).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.startMessageId(any())).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.receiverQueueSize(anyInt())).thenReturn(mockedReaderBuilder); + MultiTopicsReaderImpl mockedReader = mock(MultiTopicsReaderImpl.class); + when(mockedReaderBuilder.create()).thenReturn(mockedReader); + MultiTopicsConsumerImpl consumerImp = mock(MultiTopicsConsumerImpl.class); + when(consumerImp.getSubscription()).thenReturn(subName); + when(mockedReader.getMultiTopicsConsumer()).thenReturn(consumerImp); + HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getRequestURI()).thenReturn("/ws/v2/producer/persistent/my-property/my-ns/my-topic"); + // create reader handler + HttpServletResponse response = spy(HttpServletResponse.class); + ServletUpgradeResponse servletUpgradeResponse = new ServletUpgradeResponse(response); + ReaderHandler readerHandler = new ReaderHandler(wss, request, servletUpgradeResponse); + // verify success + Assert.assertEquals(readerHandler.getSubscription(), subName); + // Verify consumer is successfully returned + readerHandler.getConsumer(); + } + + @Test + @SuppressWarnings("unchecked") + public void testCreateIllegalReaderImp() throws IOException { + // mock data + WebSocketService wss = mock(WebSocketService.class); + PulsarClient mockedClient = mock(PulsarClient.class); + when(wss.getPulsarClient()).thenReturn(mockedClient); + ReaderBuilder mockedReaderBuilder = mock(ReaderBuilder.class); + when(mockedClient.newReader()).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.topic(any())).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.startMessageId(any())).thenReturn(mockedReaderBuilder); + when(mockedReaderBuilder.receiverQueueSize(anyInt())).thenReturn(mockedReaderBuilder); + IllegalReader illegalReader = new IllegalReader(); + when(mockedReaderBuilder.create()).thenReturn(illegalReader); + HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getRequestURI()).thenReturn("/ws/v2/producer/persistent/my-property/my-ns/my-topic"); + // create reader handler + HttpServletResponse response = spy(HttpServletResponse.class); + ServletUpgradeResponse servletUpgradeResponse = new ServletUpgradeResponse(response); + new ReaderHandler(wss, request, servletUpgradeResponse); + // verify get error + verify(response, times(1)).sendError(anyInt(), anyString()); + } + + + static class IllegalReader implements Reader { + + @Override + public String getTopic() { + return null; + } + + @Override + public Message readNext() throws PulsarClientException { + return null; + } + + @Override + public Message readNext(int timeout, TimeUnit unit) throws PulsarClientException { + return null; + } + + @Override + public CompletableFuture> readNextAsync() { + return null; + } + + @Override + public CompletableFuture closeAsync() { + return null; + } + + @Override + public boolean hasReachedEndOfTopic() { + return false; + } + + @Override + public boolean hasMessageAvailable() { + return false; + } + + @Override + public CompletableFuture hasMessageAvailableAsync() { + return null; + } + + @Override + public boolean isConnected() { + return false; + } + + @Override + public void seek(MessageId messageId) throws PulsarClientException { + + } + + @Override + public void seek(long timestamp) throws PulsarClientException { + + } + + @Override + public void seek(Function function) throws PulsarClientException { + + } + + @Override + public CompletableFuture seekAsync(Function function) { + return null; + } + + @Override + public CompletableFuture seekAsync(MessageId messageId) { + return null; + } + + @Override + public CompletableFuture seekAsync(long timestamp) { + return null; + } + + @Override + public void close() throws IOException { + + } + } +} diff --git a/pulsar-zookeeper-utils/pom.xml b/pulsar-zookeeper-utils/pom.xml index f29e10df2ca4e..319d513a906c7 100644 --- a/pulsar-zookeeper-utils/pom.xml +++ b/pulsar-zookeeper-utils/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMapping.java b/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMapping.java index caeba510fd8f5..86d39884099ab 100644 --- a/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMapping.java +++ b/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMapping.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.client.util.Strings; import java.net.InetAddress; import java.net.URI; import java.util.ArrayList; @@ -212,7 +213,9 @@ private String getRack(String bookieAddress) { } } - if (bi != null) { + if (bi != null + && !Strings.isNullOrEmpty(bi.getRack()) + && !bi.getRack().trim().equals("/")) { String rack = bi.getRack(); if (!rack.startsWith("/")) { rack = "/" + rack; diff --git a/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicy.java b/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicy.java index 93202963adbc3..4a50a6f61b380 100644 --- a/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicy.java +++ b/pulsar-zookeeper-utils/src/main/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicy.java @@ -205,9 +205,13 @@ private static Pair, Set> getIsolationGroup(EnsemblePlacemen String secondaryIsolationGroupString = castToString(properties.getOrDefault(SECONDARY_ISOLATION_BOOKIE_GROUPS, "")); if (!primaryIsolationGroupString.isEmpty()) { pair.setLeft(new HashSet(Arrays.asList(primaryIsolationGroupString.split(",")))); + } else { + pair.setLeft(Collections.emptySet()); } if (!secondaryIsolationGroupString.isEmpty()) { pair.setRight(new HashSet(Arrays.asList(secondaryIsolationGroupString.split(",")))); + } else { + pair.setRight(Collections.emptySet()); } } return pair; diff --git a/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMappingTest.java b/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMappingTest.java index 2f814062e1d87..fadffa789dc68 100644 --- a/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMappingTest.java +++ b/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkBookieRackAffinityMappingTest.java @@ -27,6 +27,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; + import org.apache.bookkeeper.conf.ClientConfiguration; import org.apache.bookkeeper.net.BookieSocketAddress; import org.apache.bookkeeper.util.ZkUtils; @@ -34,6 +36,7 @@ import org.apache.pulsar.common.policies.data.BookieInfo; import org.apache.pulsar.common.util.ObjectMapperFactory; import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.ZooKeeper; import org.awaitility.Awaitility; @@ -104,6 +107,32 @@ public void testBasic() throws Exception { assertEquals(racks2.get(2), null); localZkc.delete(ZkBookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH, -1); + assertNull(racks1.get(2)); + } + + @Test + public void testInvalidRackName() throws InterruptedException, KeeperException { + String data = "{\"group1\": {\"" + BOOKIE1 + + "\": {\"rack\": \"/\", \"hostname\": \"bookie1.example.com\"}, \"" + BOOKIE2 + + "\": {\"rack\": \"\", \"hostname\": \"bookie2.example.com\"}}}"; + + ZkUtils.createFullPathOptimistic(localZkc, ZkBookieRackAffinityMapping.BOOKIE_INFO_ROOT_PATH, data.getBytes(), + ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + + // Case1: ZKCache is given + ZkBookieRackAffinityMapping mapping1 = new ZkBookieRackAffinityMapping(); + ClientConfiguration bkClientConf1 = new ClientConfiguration(); + bkClientConf1.setProperty(ZooKeeperCache.ZK_CACHE_INSTANCE, new ZooKeeperCache("test", localZkc, 30) { + }); + + mapping1.setBookieAddressResolver(BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER); + mapping1.setConf(bkClientConf1); + List racks1 = mapping1 + .resolve(Lists.newArrayList(BOOKIE1.getHostName(), BOOKIE2.getHostName(), BOOKIE3.getHostName())); + + assertNull(racks1.get(0)); + assertNull(racks1.get(1)); + assertNull(racks1.get(2)); } @Test diff --git a/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicyTest.java b/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicyTest.java index 02df09b64e28f..04b3b67b35b5e 100644 --- a/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicyTest.java +++ b/pulsar-zookeeper-utils/src/test/java/org/apache/pulsar/zookeeper/ZkIsolatedBookieEnsemblePlacementPolicyTest.java @@ -306,6 +306,39 @@ public void testNoIsolationGroup() throws Exception { isolationPolicy.onClusterChanged(writableBookies, readOnlyBookies); isolationPolicy.newEnsemble(4, 4, 4, Collections.emptyMap(), new HashSet<>()); + + BookieId bookie1Id = new BookieSocketAddress(BOOKIE1).toBookieId(); + BookieId bookie2Id = new BookieSocketAddress(BOOKIE2).toBookieId(); + BookieId bookie3Id = new BookieSocketAddress(BOOKIE3).toBookieId(); + BookieId bookie4Id = new BookieSocketAddress(BOOKIE4).toBookieId(); + // when we set strictBookieAffinityEnabled=true and some namespace not set ISOLATION_BOOKIE_GROUPS there will set "" by default. + Map placementPolicyProperties1 = new HashMap<>(); + placementPolicyProperties1.put( + ZkIsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS, ""); + placementPolicyProperties1.put( + ZkIsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS, ""); + EnsemblePlacementPolicyConfig policyConfig = new EnsemblePlacementPolicyConfig( + ZkIsolatedBookieEnsemblePlacementPolicy.class, + placementPolicyProperties1 + ); + Map customMetadata1 = new HashMap<>(); + customMetadata1.put(EnsemblePlacementPolicyConfig.ENSEMBLE_PLACEMENT_POLICY_CONFIG, policyConfig.encode()); + + BookieId replaceBookie1 = isolationPolicy.replaceBookie(3, 3, 3, customMetadata1, + Arrays.asList(bookie1Id,bookie2Id,bookie3Id), bookie3Id, null).getResult(); + assertEquals(replaceBookie1, bookie4Id); + + // when ISOLATION_BOOKIE_GROUPS miss. + Map placementPolicyProperties2 = new HashMap<>(); + EnsemblePlacementPolicyConfig policyConfig2 = new EnsemblePlacementPolicyConfig( + ZkIsolatedBookieEnsemblePlacementPolicy.class, + placementPolicyProperties2 + ); + Map customMetadata2 = new HashMap<>(); + customMetadata2.put(EnsemblePlacementPolicyConfig.ENSEMBLE_PLACEMENT_POLICY_CONFIG, policyConfig.encode()); + BookieId replaceBookie2 = isolationPolicy.replaceBookie(3, 3, 3, customMetadata2, + Arrays.asList(bookie1Id,bookie2Id,bookie3Id), bookie3Id, null).getResult(); + assertEquals(replaceBookie2, bookie4Id); } /** diff --git a/site2/docs/administration-proxy.md b/site2/docs/administration-proxy.md index 9d99c5ff5a87f..ed29e0ace90c2 100644 --- a/site2/docs/administration-proxy.md +++ b/site2/docs/administration-proxy.md @@ -6,21 +6,15 @@ sidebar_label: Pulsar proxy Pulsar proxy is an optional gateway. Pulsar proxy is used when direct connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy. -## Configure the proxy - -Before using the proxy, you need to configure it with the brokers addresses in the cluster. You can configure the proxy to connect directly to service discovery, or specify a broker URL in the configuration. +The Pulsar proxy is not intended to be exposed on the public internet. The security considerations in the current design expect network perimeter security. The requirement of network perimeter security can be achieved with private networks. -### Use service discovery +If a proxy deployment cannot be protected with network perimeter security, the alternative would be to use [Pulsar's "Proxy SNI routing" feature](concepts-proxy-sni-routing.md) with a properly secured and audited solution. In that case Pulsar proxy component is not used at all. -Pulsar uses [ZooKeeper](https://zookeeper.apache.org) for service discovery. To connect the proxy to ZooKeeper, specify the following in `conf/proxy.conf`. -```properties -zookeeperServers=zk-0,zk-1,zk-2 -configurationStoreServers=zk-0:2184,zk-remote:2184 -``` +## Configure the proxy -> To use service discovery, you need to open the network ACLs, so the proxy can connects to the ZooKeeper nodes through the ZooKeeper client port (port `2181`) and the configuration store client port (port `2184`). +Before using the proxy, you need to configure it with the brokers addresses in the cluster. You can configure the broker URL in the proxy configuration, or the proxy to connect directly using service discovery. -> However, it is not secure to use service discovery. Because if the network ACL is open, when someone compromises a proxy, they have full access to ZooKeeper. +> In a production environment service discovery is not recommended. ### Use broker URLs @@ -49,13 +43,65 @@ The ports to connect to the brokers (6650 and 8080, or in the case of TLS, 6651 Note that if you do not use functions, you do not need to configure `functionWorkerWebServiceURL`. +### Use service discovery + +Pulsar uses [ZooKeeper](https://zookeeper.apache.org) for service discovery. To connect the proxy to ZooKeeper, specify the following in `conf/proxy.conf`. +```properties +metadataStoreUrl=my-zk-0:2181,my-zk-1:2181,my-zk-2:2181 +configurationMetadataStoreUrl=my-zk-0:2184,my-zk-remote:2184 +``` + +> To use service discovery, you need to open the network ACLs, so the proxy can connects to the ZooKeeper nodes through the ZooKeeper client port (port `2181`) and the configuration store client port (port `2184`). + +> However, it is not secure to use service discovery. Because if the network ACL is open, when someone compromises a proxy, they have full access to ZooKeeper. + +### Restricting target broker addresses to mitigate CVE-2022-24280 + +The Pulsar Proxy trusts clients to provide valid target broker addresses to connect to. +Unless the Pulsar Proxy is explicitly configured to limit access, the Pulsar Proxy is vulnerable as described in the security advisory [Apache Pulsar Proxy target broker address isn't validated (CVE-2022-24280)](https://github.com/apache/pulsar/wiki/CVE-2022-24280). + +It is necessary to limit proxied broker connections to known broker addresses by specifying `brokerProxyAllowedHostNames` and `brokerProxyAllowedIPAddresses` settings. + +When specifying `brokerProxyAllowedHostNames`, it's possible to use a wildcard. +Please notice that `*` is a wildcard that matches any character in the hostname. It also matches dot `.` characters. + +It is recommended to use a pattern that matches only the desired brokers and no other hosts in the local network. Pulsar lookups will use the default host name of the broker by default. This can be overridden with the `advertisedAddress` setting in `broker.conf`. + +To increase security, it is also possible to restrict access with the `brokerProxyAllowedIPAddresses` setting. It is not mandatory to configure `brokerProxyAllowedIPAddresses` when `brokerProxyAllowedHostNames` is properly configured so that the pattern matches only the target brokers. +`brokerProxyAllowedIPAddresses` setting supports a comma separate list of IP address, IP address ranges and IP address networks [(supported format reference)](https://seancfoley.github.io/IPAddress/IPAddress/apidocs/inet/ipaddr/IPAddressString.html). + +Example: limiting by host name in a Kubernetes deployment +```yaml + # example of limiting to Kubernetes statefulset hostnames that contain "broker-" + PULSAR_PREFIX_brokerProxyAllowedHostNames: '*broker-*.*.*.svc.cluster.local' +``` + +Example: limiting by both host name and ip address in a `proxy.conf` file for host deployment. +```properties +# require "broker" in host name +brokerProxyAllowedHostNames=*broker*.localdomain +# limit target ip addresses to a specific network +brokerProxyAllowedIPAddresses=10.0.0.0/8 +``` + +Example: limiting by multiple host name patterns and multiple ip address ranges in a `proxy.conf` file for host deployment. +```properties +# require "broker" in host name +brokerProxyAllowedHostNames=*broker*.localdomain,*broker*.otherdomain +# limit target ip addresses to a specific network or range demonstrating multiple supported formats +brokerProxyAllowedIPAddresses=10.10.0.0/16,192.168.1.100-120,172.16.2.*,10.1.2.3 +``` + + ## Start the proxy To start the proxy: ```bash $ cd /path/to/pulsar/directory -$ bin/pulsar proxy +$ bin/pulsar proxy \ + --metadata-store zk:my-zk-1:2181,my-zk-2:2181,my-zk-3:2181 \ + --configuration-metadata-store zk:my-zk-1:2181,my-zk-2:2181,my-zk-3:2181 ``` > You can run multiple instances of the Pulsar proxy in a cluster. diff --git a/site2/docs/concepts-messaging.md b/site2/docs/concepts-messaging.md index 3e488b1acfbaf..abb68624f8348 100644 --- a/site2/docs/concepts-messaging.md +++ b/site2/docs/concepts-messaging.md @@ -596,6 +596,8 @@ delayedDeliveryEnabled=true # Control the ticking time for the retry of delayed message delivery, # affecting the accuracy of the delivery time compared to the scheduled time. +# Note that this time is used to configure the HashedWheelTimer's tick time for the +# InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory). # Default is 1 second. delayedDeliveryTickTimeMillis=1000 ``` diff --git a/site2/docs/developing-binary-protocol.md b/site2/docs/developing-binary-protocol.md index 33861af0da7c9..8556243226351 100644 --- a/site2/docs/developing-binary-protocol.md +++ b/site2/docs/developing-binary-protocol.md @@ -279,6 +279,10 @@ subscription is not already there, a new one will be created. ![Consumer](assets/binary-protocol-consumer.png) +If the client does not receive a response indicating consumer creation success or failure, +the client should first send a command to close the original consumer before sending a +command to re-attempt consumer creation. + #### Flow control After the consumer is ready, the client needs to *give permission* to the @@ -388,6 +392,11 @@ Parameters: This command behaves the same as [`CloseProducer`](#command-closeproducer) +If the client does not receive a response to a `Subscribe` command within a timeout, +the client must first send a `CloseConsumer` command before sending another +`Subscribe` command. The client does not need to await a response to the `CloseConsumer` +command before sending the next `Subscribe` command. + ##### Command RedeliverUnacknowledgedMessages A consumer can ask the broker to redeliver some or all of the pending messages diff --git a/site2/docs/functions-runtime.md b/site2/docs/functions-runtime.md index 0155f17bf6695..09eb1ef7c23b3 100644 --- a/site2/docs/functions-runtime.md +++ b/site2/docs/functions-runtime.md @@ -136,6 +136,10 @@ functionRuntimeFactoryConfigs: extraFunctionDependenciesDir: # Additional memory padding added on top of the memory requested by the function per on a per instance basis percentMemoryPadding: 10 + # The duration (in seconds) before the StatefulSet is deleted after a function stops or restarts. + # Value must be a non-negative integer. 0 indicates the StatefulSet is deleted immediately. + # Default is 5 seconds. + gracePeriodSeconds: 5 ``` If you run functions worker embedded in a broker on Kubernetes, you can use the default settings. diff --git a/site2/docs/functions-worker.md b/site2/docs/functions-worker.md index 05944165c4374..0a1c985a76b05 100644 --- a/site2/docs/functions-worker.md +++ b/site2/docs/functions-worker.md @@ -178,13 +178,13 @@ authenticationEnabled: true authenticationProviders: ['org.apache.pulsar.broker.authentication.AuthenticationProviderTls'] ``` -For *SASL Authentication* provider, add `saslJaasClientAllowedIds` and `saslJaasBrokerSectionName` +For *SASL Authentication* provider, add `saslJaasClientAllowedIds` and `saslJaasServerSectionName` under `properties` if needed. ``` properties: saslJaasClientAllowedIds: .*pulsar.* - saslJaasBrokerSectionName: Broker + saslJaasServerSectionName: Broker ``` For *Token Authentication* provider, add necessary settings for `properties` if needed. diff --git a/site2/docs/performance-pulsar-perf.md b/site2/docs/performance-pulsar-perf.md index bb831154b80de..c5aafafe4b5c4 100644 --- a/site2/docs/performance-pulsar-perf.md +++ b/site2/docs/performance-pulsar-perf.md @@ -32,7 +32,7 @@ After the command is executed, the test data is continuously output on the Conso 19:54:44.336 [Thread-1] INFO org.apache.pulsar.testclient.PerformanceProducer - Aggregated latency stats --- Latency: mean: 3.383 ms - med: 3.293 - 95pct: 4.610 - 99pct: 5.059 - 99.9pct: 5.588 - 99.99pct: 5.837 - 99.999pct: 6.609 - Max: 6.609 ``` -From the above test data, you can get the throughput statistics and the write latency statistics. The aggregated statistics is printed when the Pulsar Perf is stopped. You can press **Ctrl**+**C** to stop the Pulsar Perf. After the Pulsar Perf is stopped, the [HdrHistogram](http://hdrhistogram.github.io/HdrHistogram/) formatted test result appears under your directory. The document looks like `perf-producer-1589370810837.hgrm`. You can also check the test result through [HdrHistogram Plotter](https://hdrhistogram.github.io/HdrHistogram/plotFiles.html). For details about how to check the test result through [HdrHistogram Plotter](https://hdrhistogram.github.io/HdrHistogram/plotFiles.html), see [HdrHistogram Plotter](#hdrhistogram-plotter). +From the above test data, you can get the throughput statistics and the write latency statistics. The aggregated statistics is printed when the Pulsar Perf is stopped. You can press **Ctrl**+**C** to stop the Pulsar Perf. If you specify a filename with the `--histogram-file` parameter, a file with the [HdrHistogram](http://hdrhistogram.github.io/HdrHistogram/) formatted test result appears under your directory after Pulsar Perf is stopped. You can also check the test result through [HdrHistogram Plotter](https://hdrhistogram.github.io/HdrHistogram/plotFiles.html). For details about how to check the test result through [HdrHistogram Plotter](https://hdrhistogram.github.io/HdrHistogram/plotFiles.html), see [HdrHistogram Plotter](#hdrhistogram-plotter). ### Configuration options for `pulsar-perf produce` @@ -61,6 +61,7 @@ The following table lists configuration options available for the `pulsar-perf p | format-class | Set the custom formatter class name. | org.apache.pulsar.testclient.DefaultMessageFormatter | | format-payload | Configure whether to format %i as a message index in the stream from producer and/or %t as the timestamp nanoseconds. | false | | help | Configure the help message. | false | +| histogram-file | HdrHistogram output file | N/A | | max-connections | Set the maximum number of TCP connections to a single broker. | 100 | | max-outstanding | Set the maximum number of outstanding messages. | 1000 | | max-outstanding-across-partitions | Set the maximum number of outstanding messages across partitions. | 50000 | @@ -131,6 +132,7 @@ The following table lists configuration options available for the `pulsar-perf c | encryption-key-name | Set the name of the public key used to encrypt the payload. | N/A | | encryption-key-value-file | Set the file which contains the public key used to encrypt the payload. | N/A | | help | Configure the help message. | false | +| histogram-file | HdrHistogram output file | N/A | | expire_time_incomplete_chunked_messages | Set the expiration time for incomplete chunk messages (in milliseconds). | 0 | | max-connections | Set the maximum number of TCP connections to a single broker. | 100 | | max_chunked_msg | Set the max pending chunk messages. | 0 | diff --git a/site2/docs/reference-cli-tools.md b/site2/docs/reference-cli-tools.md index 6a128186dc284..f4ae1365d8723 100644 --- a/site2/docs/reference-cli-tools.md +++ b/site2/docs/reference-cli-tools.md @@ -312,6 +312,7 @@ Options |`-m`, `--messages`|Comma-separated string of messages to send; either -m or -f must be specified|[]| |`-n`, `--num-produce`|The number of times to send the message(s); the count of messages/files * num-produce should be below 1000|1| |`-r`, `--rate`|Rate (in messages per second) at which to produce; a value 0 means to produce messages as fast as possible|0.0| +|`-db`, `--disable-batching`|Disable batch sending of messages|false| |`-c`, `--chunking`|Split the message and publish in chunks if the message size is larger than the allowed max size|false| |`-s`, `--separator`|Character to split messages string with.|","| |`-k`, `--key`|Message key to add|key=value string, like k1=v1,k2=v2.| @@ -446,6 +447,7 @@ Options |`-mc`, `--max_chunked_msg`|Max pending chunk messages|0| |`-n`, `--num-consumers`|Number of consumers (per topic)|1| |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1| +|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1| |`-ns`, `--num-subscriptions`|Number of subscriptions (per topic)|1| |`-t`, `--num-topics`|The number of topics|1| |`-pm`, `--pool-messages`|Use the pooled message|true| @@ -540,6 +542,7 @@ Options |`-n`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0| |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100| |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1| +|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1| |`-t`, `--num-topics`|The number of topics|1| |`-r`, `--rate`|Simulate a slow message reader (rate in msg/s)|0| |`-q`, `--receiver-queue-size`|Size of the receiver queue|1000| diff --git a/site2/docs/reference-configuration.md b/site2/docs/reference-configuration.md index 72f134ae7286f..3226007209fd6 100644 --- a/site2/docs/reference-configuration.md +++ b/site2/docs/reference-configuration.md @@ -352,6 +352,15 @@ brokerServiceCompactionThresholdInBytes|If the estimated backlog size is greater | managedLedgerInfoCompressionType | Compression type of managed ledger information.

Available options are `NONE`, `LZ4`, `ZLIB`, `ZSTD`, and `SNAPPY`).

If this value is `NONE` or invalid, the `managedLedgerInfo` is not compressed.

**Note** that after enabling this configuration, if you want to degrade a broker, you need to change the value to `NONE` and make sure all ledger metadata is saved without compression. | None | | additionalServlets | Additional servlet name.

If you have multiple additional servlets, separate them by commas.

For example, additionalServlet_1, additionalServlet_2 | N/A | | additionalServletDirectory | Location of broker additional servlet NAR directory | ./brokerAdditionalServlet | +#### Configuration Override For Clients Internal to Broker + +It's possible to configure some clients by using the appropriate prefix. + +|Prefix|Description| +|brokerClient_| Configure **all** the broker's Pulsar Clients and Pulsar Admin Clients. These configurations are applied after hard coded configuration and before the above brokerClient configurations named above.| +|bookkeeper_| Configure the broker's bookkeeper clients used by managed ledgers and the BookkeeperPackagesStorage bookkeeper client. Takes precedence over most other configuration values.| + +Note: when running the function worker within the broker, these prefixed configurations do not apply to any of those clients. You must instead configure those clients using the `functions_worker.yml`. ## Client @@ -545,7 +554,7 @@ You can set the log level and configuration in the [log4j2.yaml](https://github |tokenAudienceClaim| The token audience "claim" name, e.g. "aud". It is used to get the audience from token. If it is not set, the audience is not verified. || | tokenAudience | The token audience stands for this broker. The field `tokenAudienceClaim` of a valid token need contains this parameter.| | |saslJaasClientAllowedIds|This is a regexp, which limits the range of possible ids which can connect to the Broker using SASL. By default, it is set to `SaslConstants.JAAS_CLIENT_ALLOWED_IDS_DEFAULT`, which is ".*pulsar.*", so only clients whose id contains 'pulsar' are allowed to connect.|N/A| -|saslJaasBrokerSectionName|Service Principal, for login context name. By default, it is set to `SaslConstants.JAAS_DEFAULT_BROKER_SECTION_NAME`, which is "Broker".|N/A| +|saslJaasServerSectionName|Service Principal, for login context name. By default, it is set to `SaslConstants.JAAS_DEFAULT_BROKER_SECTION_NAME`, which is "Broker".|N/A| |httpMaxRequestSize|If the value is larger than 0, it rejects all HTTP requests with bodies larged than the configured limit.|-1| |exposePreciseBacklogInPrometheus| Enable expose the precise backlog stats, set false to use published counter and consumed counter to calculate, this would be more efficient but may be inaccurate. |false| |bookkeeperMetadataServiceUri|Metadata service uri is what BookKeeper used for loading corresponding metadata driver and resolving its metadata service location. This value can be fetched using `bookkeeper shell whatisinstanceid` command in BookKeeper cluster. For example: `zk+hierarchical://localhost:2181/ledgers`. The metadata service uri list can also be semicolon separated values like: `zk+hierarchical://zk1:2181;zk2:2181;zk3:2181/ledgers`.|N/A| @@ -677,6 +686,12 @@ You can set the log level and configuration in the [log4j2.yaml](https://github |tlsCertificateFilePath||| |tlsKeyFilePath ||| |tlsTrustCertsFilePath||| +#### Configuration Override For Clients Internal to WebSocket + +It's possible to configure some clients by using the appropriate prefix. + +|Prefix|Description| +|brokerClient_| Configure **all** the broker's Pulsar Clients. These configurations are applied after hard coded configuration and before the above brokerClient configurations named above.| ## Pulsar proxy @@ -732,6 +747,14 @@ The [Pulsar proxy](concepts-architecture-overview.md#pulsar-proxy) can be config |tokenAudienceClaim| The token audience "claim" name, e.g. "aud". It is used to get the audience from token. If it is not set, the audience is not verified. || | tokenAudience | The token audience stands for this broker. The field `tokenAudienceClaim` of a valid token need contains this parameter.| | |haProxyProtocolEnabled | Enable or disable the [HAProxy](http://www.haproxy.org/) protocol. |false| +| numIOThreads | Number of threads used for Netty IO. | 2 * Runtime.getRuntime().availableProcessors() | +| numAcceptorThreads | Number of threads used for Netty Acceptor. | 1 | +#### Configuration Override For Clients Internal to Proxy + +It's possible to configure some clients by using the appropriate prefix. + +|Prefix|Description| +|brokerClient_| Configure **all** the proxy's Pulsar Clients. These configurations are applied after hard coded configuration and before the above brokerClient configurations named above.| ## ZooKeeper diff --git a/site2/docs/reference-metrics.md b/site2/docs/reference-metrics.md index 3467931e86b98..720d077d1e97e 100644 --- a/site2/docs/reference-metrics.md +++ b/site2/docs/reference-metrics.md @@ -328,6 +328,10 @@ All the bundleUnloading metrics are labelled with the following labels: - cluster: cluster=${pulsar_cluster}. ${pulsar_cluster} is the cluster name that you have configured in the `broker.conf` file. - metric: metric="bundlesSplit". +| Name | Type | Description | +|-------------------------------|---------|------------------------------------------------------------| +| pulsar_lb_bundles_split_total | Counter | The total count of bundle split in this leader broker | + | Name | Type | Description | | --- | --- | --- | | pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | diff --git a/site2/docs/security-kerberos.md b/site2/docs/security-kerberos.md index f7fe4c5f650e5..897bf3bb0dd9f 100644 --- a/site2/docs/security-kerberos.md +++ b/site2/docs/security-kerberos.md @@ -113,7 +113,7 @@ You can have 2 separate JAAS configuration files: - Set `authenticationEnabled` to `true`; - Set `authenticationProviders` to choose `AuthenticationProviderSasl`; - Set `saslJaasClientAllowedIds` regex for principal that is allowed to connect to broker; - - Set `saslJaasBrokerSectionName` that corresponds to the section in JAAS configuration file for broker; + - Set `saslJaasServerSectionName` that corresponds to the section in JAAS configuration file for broker; To make Pulsar internal admin client work properly, you need to set the configuration in the `broker.conf` file as below: - Set `brokerClientAuthenticationPlugin` to client plugin `AuthenticationSasl`; @@ -125,7 +125,7 @@ You can have 2 separate JAAS configuration files: authenticationEnabled=true authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderSasl saslJaasClientAllowedIds=.*client.* -saslJaasBrokerSectionName=PulsarBroker +saslJaasServerSectionName=PulsarBroker ## Authentication settings of the broker itself. Used when the broker connects to other brokers brokerClientAuthenticationPlugin=org.apache.pulsar.client.impl.auth.AuthenticationSasl @@ -306,7 +306,7 @@ In the `proxy.conf` file, set Kerberos related configuration. Here is an example authenticationEnabled=true authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderSasl saslJaasClientAllowedIds=.*client.* -saslJaasBrokerSectionName=PulsarProxy +saslJaasServerSectionName=PulsarProxy ## related to be authenticated by broker brokerClientAuthenticationPlugin=org.apache.pulsar.client.impl.auth.AuthenticationSasl @@ -326,7 +326,7 @@ The broker side configuration file is the same with the above `broker.conf`, you authenticationEnabled=true authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderSasl saslJaasClientAllowedIds=.*client.* -saslJaasBrokerSectionName=PulsarBroker +saslJaasServerSectionName=PulsarBroker ``` ## Regarding authorization and role token diff --git a/site2/docs/security-oauth2.md b/site2/docs/security-oauth2.md index 7ea9f35969397..35d0b5f4ccc40 100644 --- a/site2/docs/security-oauth2.md +++ b/site2/docs/security-oauth2.md @@ -28,7 +28,7 @@ The following table lists parameters supported for the `client credentials` auth | `type` | Oauth 2.0 authentication type. | `client_credentials` (default) | Optional | | `issuerUrl` | URL of the authentication provider which allows the Pulsar client to obtain an access token | `https://accounts.google.com` | Required | | `privateKey` | URL to a JSON credentials file | Support the following pattern formats:
  • `file:///path/to/file`
  • `file:/path/to/file`
  • `data:application/json;base64,` | Required | -| `audience` | An OAuth 2.0 "resource server" identifier for the Pulsar cluster | `https://broker.example.com` | Required | +| `audience` | An OAuth 2.0 "resource server" identifier for the Pulsar cluster | `https://broker.example.com` | Optional | The credentials file contains service account credentials used with the client authentication type. The following shows an example of a credentials file `credentials_file.json`. @@ -63,7 +63,7 @@ In the above example, the mapping relationship is shown as below. - The `issuerUrl` parameter in this plugin is mapped to `--url https://dev-kt-aa9ne.us.auth0.com`. - The `privateKey` file parameter in this plugin should at least contains the `client_id` and `client_secret` fields. -- The `audience` parameter in this plugin is mapped to `"audience":"https://dev-kt-aa9ne.us.auth0.com/api/v2/"`. +- The `audience` parameter in this plugin is mapped to `"audience":"https://dev-kt-aa9ne.us.auth0.com/api/v2/"`. This field is only used by some identity providers. ## Client Configuration diff --git a/site2/docs/sql-deployment-configurations.md b/site2/docs/sql-deployment-configurations.md index 1fe0353f07531..e5c402ebdc14d 100644 --- a/site2/docs/sql-deployment-configurations.md +++ b/site2/docs/sql-deployment-configurations.md @@ -24,6 +24,9 @@ pulsar.entry-read-batch-size=100 # default number of splits to use per query pulsar.target-num-splits=4 + +# max size of one batch message (default value is 5MB) +pulsar.max-message-size=5242880 ``` You can connect Presto to a Pulsar cluster with multiple hosts. To configure multiple hosts for brokers, add multiple URLs to `pulsar.web-service-url`. To configure multiple hosts for ZooKeeper, add multiple URIs to `pulsar.zookeeper-uri`. The following is an example. diff --git a/site2/website-next/docs/administration-proxy.md b/site2/website-next/docs/administration-proxy.md index f1c38d71c2981..1371ee2d6059f 100644 --- a/site2/website-next/docs/administration-proxy.md +++ b/site2/website-next/docs/administration-proxy.md @@ -10,9 +10,13 @@ import TabItem from '@theme/TabItem'; Pulsar proxy is an optional gateway. Pulsar proxy is used when direct connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy. +The Pulsar proxy is not intended to be exposed on the public internet. The security considerations in the current design expect network perimeter security. The requirement of network perimeter security can be achieved with private networks. + +If a proxy deployment cannot be protected with network perimeter security, the alternative would be to use [Pulsar's "Proxy SNI routing" feature](concepts-proxy-sni-routing.md) with a properly secured and audited solution. In that case Pulsar proxy component is not used at all. + ## Configure the proxy -Before using the proxy, you need to configure it with the brokers addresses in the cluster. You can configure the proxy to connect directly to service discovery, or specify a broker URL in the configuration. +Before using a proxy, you need to configure it with a broker's address in the cluster. You can configure the broker URL in the proxy configuration, or the proxy to connect directly using service discovery. ### Use service discovery @@ -47,12 +51,51 @@ brokerWebServiceURLTLS=https://brokers.example.com:8443 functionWorkerWebServiceURL=https://function-workers.example.com:8443 ``` -The hostname in the URLs provided should be a DNS entry which points to multiple brokers or a virtual IP address, which is backed by multiple broker IP addresses, so that the proxy does not lose connectivity to Pulsar cluster if a single broker becomes unavailable. +The hostname in the URLs provided should be a DNS entry that points to multiple brokers or a virtual IP address, which is backed by multiple broker IP addresses, so that the proxy does not lose connectivity to Pulsar cluster if a single broker becomes unavailable. The ports to connect to the brokers (6650 and 8080, or in the case of TLS, 6651 and 8443) should be open in the network ACLs. Note that if you do not use functions, you do not need to configure `functionWorkerWebServiceURL`. +> However, it is not secure to use service discovery. Because if the network ACL is open, when someone compromises a proxy, they have full access to ZooKeeper. + +### Restricting target broker addresses to mitigate CVE-2022-24280 + +The Pulsar Proxy trusts clients to provide valid target broker addresses to connect to. +Unless the Pulsar Proxy is explicitly configured to limit access, the Pulsar Proxy is vulnerable as described in the security advisory [Apache Pulsar Proxy target broker address isn't validated (CVE-2022-24280)](https://github.com/apache/pulsar/wiki/CVE-2022-24280). + +It is necessary to limit proxied broker connections to known broker addresses by specifying `brokerProxyAllowedHostNames` and `brokerProxyAllowedIPAddresses` settings. + +When specifying `brokerProxyAllowedHostNames`, it's possible to use a wildcard. +Please notice that `*` is a wildcard that matches any character in the hostname. It also matches dot `.` characters. + +It is recommended to use a pattern that matches only the desired brokers and no other hosts in the local network. Pulsar lookups will use the default host name of the broker by default. This can be overridden with the `advertisedAddress` setting in `broker.conf`. + +To increase security, it is also possible to restrict access with the `brokerProxyAllowedIPAddresses` setting. It is not mandatory to configure `brokerProxyAllowedIPAddresses` when `brokerProxyAllowedHostNames` is properly configured so that the pattern matches only the target brokers. +`brokerProxyAllowedIPAddresses` setting supports a comma separate list of IP address, IP address ranges and IP address networks [(supported format reference)](https://seancfoley.github.io/IPAddress/IPAddress/apidocs/inet/ipaddr/IPAddressString.html). + +Example: limiting by host name in a Kubernetes deployment +```yaml + # example of limiting to Kubernetes statefulset hostnames that contain "broker-" + PULSAR_PREFIX_brokerProxyAllowedHostNames: '*broker-*.*.*.svc.cluster.local' +``` + +Example: limiting by both host name and ip address in a `proxy.conf` file for host deployment. +```properties +# require "broker" in host name +brokerProxyAllowedHostNames=*broker*.localdomain +# limit target ip addresses to a specific network +brokerProxyAllowedIPAddresses=10.0.0.0/8 +``` + +Example: limiting by multiple host name patterns and multiple ip address ranges in a `proxy.conf` file for host deployment. +```properties +# require "broker" in host name +brokerProxyAllowedHostNames=*broker*.localdomain,*broker*.otherdomain +# limit target ip addresses to a specific network or range demonstrating multiple supported formats +brokerProxyAllowedIPAddresses=10.10.0.0/16,192.168.1.100-120,172.16.2.*,10.1.2.3 +``` + ## Start the proxy To start the proxy: diff --git a/site2/website-next/docs/reference-cli-tools.md b/site2/website-next/docs/reference-cli-tools.md index cbdf7173ba927..a2eda14a32c62 100644 --- a/site2/website-next/docs/reference-cli-tools.md +++ b/site2/website-next/docs/reference-cli-tools.md @@ -533,6 +533,7 @@ Options |`-mc`, `--max_chunked_msg`|Max pending chunk messages|0| |`-n`, `--num-consumers`|Number of consumers (per topic)|1| |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1| +|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1| |`-ns`, `--num-subscriptions`|Number of subscriptions (per topic)|1| |`-t`, `--num-topics`|The number of topics|1| |`-pm`, `--pool-messages`|Use the pooled message|true| @@ -633,6 +634,7 @@ Options |`-n`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0| |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100| |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1| +|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1| |`-t`, `--num-topics`|The number of topics|1| |`-r`, `--rate`|Simulate a slow message reader (rate in msg/s)|0| |`-q`, `--receiver-queue-size`|Size of the receiver queue|1000| diff --git a/site2/website-next/docs/security-oauth2.md b/site2/website-next/docs/security-oauth2.md index 19c37893f9972..820a6968604eb 100644 --- a/site2/website-next/docs/security-oauth2.md +++ b/site2/website-next/docs/security-oauth2.md @@ -32,7 +32,7 @@ The following table lists parameters supported for the `client credentials` auth | `type` | Oauth 2.0 authentication type. | `client_credentials` (default) | Optional | | `issuerUrl` | URL of the authentication provider which allows the Pulsar client to obtain an access token | `https://accounts.google.com` | Required | | `privateKey` | URL to a JSON credentials file | Support the following pattern formats:
  • `file:///path/to/file`
  • `file:/path/to/file`
  • `data:application/json;base64,`
  • | Required | -| `audience` | An OAuth 2.0 "resource server" identifier for the Pulsar cluster | `https://broker.example.com` | Required | +| `audience` | An OAuth 2.0 "resource server" identifier for the Pulsar cluster | `https://broker.example.com` | Optional | The credentials file contains service account credentials used with the client authentication type. The following shows an example of a credentials file `credentials_file.json`. @@ -71,7 +71,7 @@ In the above example, the mapping relationship is shown as below. - The `issuerUrl` parameter in this plugin is mapped to `--url https://dev-kt-aa9ne.us.auth0.com`. - The `privateKey` file parameter in this plugin should at least contains the `client_id` and `client_secret` fields. -- The `audience` parameter in this plugin is mapped to `"audience":"https://dev-kt-aa9ne.us.auth0.com/api/v2/"`. +- The `audience` parameter in this plugin is mapped to `"audience":"https://dev-kt-aa9ne.us.auth0.com/api/v2/"`. This field is only used by some identity providers. ## Client Configuration diff --git a/site2/website/versioned_docs/version-2.6.0/reference-metrics.md b/site2/website/versioned_docs/version-2.6.0/reference-metrics.md index d3f60bd415f89..71a26f5fba988 100644 --- a/site2/website/versioned_docs/version-2.6.0/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.6.0/reference-metrics.md @@ -280,7 +280,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.6.1/reference-metrics.md b/site2/website/versioned_docs/version-2.6.1/reference-metrics.md index ebd6e4616b55f..d0ba807652c2e 100644 --- a/site2/website/versioned_docs/version-2.6.1/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.6.1/reference-metrics.md @@ -280,7 +280,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.6.2/reference-metrics.md b/site2/website/versioned_docs/version-2.6.2/reference-metrics.md index e1f58788b35ed..8bb6d1fdb3039 100644 --- a/site2/website/versioned_docs/version-2.6.2/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.6.2/reference-metrics.md @@ -280,7 +280,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.6.3/reference-metrics.md b/site2/website/versioned_docs/version-2.6.3/reference-metrics.md index f0feb88e39041..076c08ef24a72 100644 --- a/site2/website/versioned_docs/version-2.6.3/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.6.3/reference-metrics.md @@ -280,7 +280,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.6.4/reference-metrics.md b/site2/website/versioned_docs/version-2.6.4/reference-metrics.md index 9804a138ccf47..24ffea239d360 100644 --- a/site2/website/versioned_docs/version-2.6.4/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.6.4/reference-metrics.md @@ -280,7 +280,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.7.0/reference-metrics.md b/site2/website/versioned_docs/version-2.7.0/reference-metrics.md index cd9731e7804d8..74684fd73d59a 100644 --- a/site2/website/versioned_docs/version-2.7.0/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.7.0/reference-metrics.md @@ -276,7 +276,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.7.1/reference-metrics.md b/site2/website/versioned_docs/version-2.7.1/reference-metrics.md index 0aa3ca92e08fe..fee9402726479 100644 --- a/site2/website/versioned_docs/version-2.7.1/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.7.1/reference-metrics.md @@ -278,7 +278,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.7.2/reference-metrics.md b/site2/website/versioned_docs/version-2.7.2/reference-metrics.md index 0b2dada20c2eb..d2c90a841a39e 100644 --- a/site2/website/versioned_docs/version-2.7.2/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.7.2/reference-metrics.md @@ -278,7 +278,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/site2/website/versioned_docs/version-2.7.3/reference-metrics.md b/site2/website/versioned_docs/version-2.7.3/reference-metrics.md index 60c26903f9847..408acb161ed7f 100644 --- a/site2/website/versioned_docs/version-2.7.3/reference-metrics.md +++ b/site2/website/versioned_docs/version-2.7.3/reference-metrics.md @@ -298,7 +298,7 @@ All the bundleUnloading metrics are labelled with the following labels: | Name | Type | Description | | --- | --- | --- | -| pulsar_lb_bundles_split_count | Counter | bundle split count in this bundle splitting check interval | +| pulsar_lb_bundles_split_count | Counter | The total count of bundle split in this leader broker | ### Subscription metrics diff --git a/src/assembly-source-package.xml b/src/assembly-source-package.xml index 2677299357f3f..f31aadc146972 100644 --- a/src/assembly-source-package.xml +++ b/src/assembly-source-package.xml @@ -106,6 +106,7 @@ /docker/pulsar/scripts *.sh + *.py 0755
    diff --git a/src/check-binary-license b/src/check-binary-license index 5e8f365178acf..afe380d5c8ffb 100755 --- a/src/check-binary-license +++ b/src/check-binary-license @@ -96,7 +96,7 @@ done if [ "$NO_PRESTO" -ne 1 ]; then # check pulsar sql jars - JARS=$(tar -tf $TARBALL | grep '\.jar' | grep 'lib/presto/' | grep -v pulsar-client | grep -v bouncy-castle-bc | grep -v pulsar-metadata | grep -v 'managed-ledger' | grep -v 'pulsar-client-admin' | grep -v 'pulsar-client-api' | grep -v 'pulsar-functions-api' | grep -v 'pulsar-presto-connector-original' | grep -v 'pulsar-presto-distribution' | grep -v 'pulsar-common' | grep -v 'pulsar-functions-proto' | grep -v 'pulsar-functions-utils' | grep -v 'pulsar-io-core' | grep -v 'pulsar-transaction-common' | grep -v 'pulsar-package-core' | sed 's!.*/!!' | sort) + JARS=$(tar -tf $TARBALL | grep '\.jar' | grep 'lib/presto/' | grep -v pulsar-client | grep -v bouncy-castle-bc | grep -v pulsar-metadata | grep -v 'managed-ledger' | grep -v 'pulsar-client-admin' | grep -v 'pulsar-client-api' | grep -v 'pulsar-functions-api' | grep -v 'pulsar-presto-connector-original' | grep -v 'pulsar-presto-distribution' | grep -v 'pulsar-common' | grep -v 'pulsar-functions-proto' | grep -v 'pulsar-functions-utils' | grep -v 'pulsar-io-core' | grep -v 'pulsar-transaction-common' | grep -v 'pulsar-package-core' | grep -v 'java-version-trim-agent' | sed 's!.*/!!' | sort) LICENSEPATH=$(tar -tf $TARBALL | awk '/^[^\/]*\/lib\/presto\/LICENSE/') LICENSE=$(tar -O -xf $TARBALL "$LICENSEPATH") LICENSEJARS=$(echo "$LICENSE" | sed -nE 's!.* (.*\.jar).*!\1!gp') diff --git a/src/gen-pulsar-version-macro.py b/src/gen-pulsar-version-macro.py new file mode 100755 index 0000000000000..f32df91772f74 --- /dev/null +++ b/src/gen-pulsar-version-macro.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import xml.etree.ElementTree as ET +import re +from os.path import dirname, realpath, join + +# Derive the POM path from the current script location +TOP_LEVEL_PATH = dirname(dirname(realpath(__file__))) +POM_PATH = join(TOP_LEVEL_PATH, 'pom.xml') + +root = ET.XML(open(POM_PATH).read()) +m = re.search(r'^(\d+)\.(\d+)\.(\d+)', root.find('{http://maven.apache.org/POM/4.0.0}version').text) + +version_macro = 0 +for i in range(3): + version_macro += int(m.group(3 - i)) * (1000 ** i) +print(version_macro) diff --git a/src/owasp-dependency-check-false-positives.xml b/src/owasp-dependency-check-false-positives.xml index 7336feedec244..4984db5762bfd 100644 --- a/src/owasp-dependency-check-false-positives.xml +++ b/src/owasp-dependency-check-false-positives.xml @@ -32,6 +32,11 @@ org\.apache\.pulsar:.* cpe:/a:apache:zookeeper + + pulsar-package-bookkeeper-storage gets mixed with bookkeeper. + org\.apache\.pulsar:.* + cpe:/a:apache:bookkeeper + kubernetes client doesn't contain CVE-2020-8554 io\.kubernetes:.* @@ -42,4 +47,25 @@ org\.apache\.avro:.* CVE-2019-17195 + + CVE-2021-43045 affects only .NET distro, see https://github.com/apache/avro/pull/1357 + org\.apache\.avro:.* + CVE-2021-43045 + + + + ^pkg:maven/io\.netty/netty\-tcnative\-classes@.*$ + cpe:/a:netty:netty + + + + + + 1a754a5dd672218a2ac667d7ff2b28df7a5a240e + CVE-2022-25647 + \ No newline at end of file diff --git a/src/owasp-dependency-check-suppressions.xml b/src/owasp-dependency-check-suppressions.xml index 139365d98985f..90698c0843529 100644 --- a/src/owasp-dependency-check-suppressions.xml +++ b/src/owasp-dependency-check-suppressions.xml @@ -41,4 +41,106 @@ org\.apache\.zookeeper:.*:3\.6\.2 .* - \ No newline at end of file + + + + + ef50bfa2c0491a11dcc35d9822edbfd6170e1ea2 + cpe:/a:jetbrains:kotlin + + + + 3546900a3ebff0c43f31190baf87a9220e37b7ea + CVE-2022-24329 + + + + 3302f9ec8a5c1ed220781dbd37770072549bd333 + CVE-2022-24329 + + + + 461367948840adbb0839c51d91ed74ef4a9ccb52 + CVE-2022-24329 + + + + + + ^pkg:maven/org\.apache\.avro/avro@.*$ + CVE-2021-43045 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2018-14668 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2018-14669 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2018-14670 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2018-14671 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2018-14672 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2019-15024 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2019-16535 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2019-18657 + + + + ^pkg:maven/ru\.yandex\.clickhouse/clickhouse\-jdbc@.*$ + CVE-2021-25263 + + diff --git a/structured-event-log/pom.xml b/structured-event-log/pom.xml index cb882523fbafe..1d84189f513f3 100644 --- a/structured-event-log/pom.xml +++ b/structured-event-log/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/testmocks/pom.xml b/testmocks/pom.xml index 0d199f1ccb273..7bc28b2bd7127 100644 --- a/testmocks/pom.xml +++ b/testmocks/pom.xml @@ -25,7 +25,7 @@ pulsar org.apache.pulsar - 2.9.0-SNAPSHOT + 2.9.3 testmocks diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java b/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java index 184fc7f4c025d..910c829761be4 100644 --- a/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/BookKeeperTestClient.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.bookkeeper.conf.ClientConfiguration; +import org.apache.bookkeeper.meta.zk.ZKMetadataClientDriver; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooKeeper; @@ -32,7 +33,7 @@ public BookKeeperTestClient(ClientConfiguration conf) throws IOException, Interr } public ZooKeeper getZkHandle() { - return super.getZkHandle(); + return ((ZKMetadataClientDriver) metadataDriver).getZk(); } public ClientConfiguration getConf() { diff --git a/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockLedgerHandle.java b/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockLedgerHandle.java index 8a62e42e05a6f..3f9b17b312f69 100644 --- a/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockLedgerHandle.java +++ b/testmocks/src/main/java/org/apache/bookkeeper/client/PulsarMockLedgerHandle.java @@ -18,6 +18,7 @@ */ package org.apache.bookkeeper.client; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import io.netty.buffer.ByteBuf; @@ -31,7 +32,7 @@ import java.util.List; import java.util.Queue; import java.util.concurrent.CompletableFuture; - +import lombok.Getter; import org.apache.bookkeeper.client.AsyncCallback.AddCallback; import org.apache.bookkeeper.client.AsyncCallback.CloseCallback; import org.apache.bookkeeper.client.AsyncCallback.ReadCallback; @@ -44,7 +45,6 @@ import org.apache.bookkeeper.client.impl.LedgerEntryImpl; import org.apache.bookkeeper.common.concurrent.FutureUtils; import org.apache.bookkeeper.net.BookieId; -import org.apache.bookkeeper.net.BookieSocketAddress; import org.apache.bookkeeper.versioning.LongVersion; import org.apache.bookkeeper.versioning.Versioned; import org.slf4j.Logger; @@ -62,6 +62,8 @@ public class PulsarMockLedgerHandle extends LedgerHandle { final byte[] passwd; final ReadHandle readHandle; long lastEntry = -1; + @VisibleForTesting + @Getter boolean fenced = false; public PulsarMockLedgerHandle(PulsarMockBookKeeper bk, long id, diff --git a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java index c7f677ddec699..cd0c60c0087df 100644 --- a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java +++ b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeper.java @@ -19,32 +19,33 @@ package org.apache.zookeeper; import com.google.common.collect.HashMultimap; +import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Multimaps; import com.google.common.collect.SetMultimap; import com.google.common.collect.Sets; import io.netty.util.concurrent.DefaultThreadFactory; - -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiPredicate; - import lombok.AllArgsConstructor; import lombok.Data; -import org.apache.commons.lang3.tuple.Pair; +import lombok.extern.slf4j.Slf4j; import org.apache.zookeeper.AsyncCallback.Children2Callback; import org.apache.zookeeper.AsyncCallback.ChildrenCallback; import org.apache.zookeeper.AsyncCallback.DataCallback; @@ -53,7 +54,6 @@ import org.apache.zookeeper.AsyncCallback.VoidCallback; import org.apache.zookeeper.Watcher.Event.EventType; import org.apache.zookeeper.Watcher.Event.KeeperState; -import org.apache.zookeeper.client.HostProvider; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Stat; import org.objenesis.Objenesis; @@ -64,7 +64,19 @@ import org.slf4j.LoggerFactory; public class MockZooKeeper extends ZooKeeper { - private TreeMap> tree; + @Data + @AllArgsConstructor + private static class MockZNode { + byte[] content; + int version; + long ephemeralOwner; + + static MockZNode of(byte[] content, int version, long ephemeralOwner) { + return new MockZNode(content, version, ephemeralOwner); + } + } + + private TreeMap tree; private SetMultimap watchers; private volatile boolean stopped; private AtomicReference alwaysFail; @@ -78,6 +90,7 @@ public class MockZooKeeper extends ZooKeeper { private ReentrantLock mutex; private AtomicLong sequentialIdGenerator; + private ThreadLocal epheralOwnerThreadLocal; //see details of Objenesis caching - http://objenesis.org/details.html //see supported jvms - https://github.com/easymock/objenesis/blob/master/SupportedJVMs.md @@ -85,9 +98,9 @@ public class MockZooKeeper extends ZooKeeper { public enum Op { CREATE, GET, SET, GET_CHILDREN, DELETE, EXISTS, SYNC, - }; + } - private class Failure { + private static class Failure { final KeeperException.Code failReturnCode; final BiPredicate predicate; @@ -121,14 +134,7 @@ public static MockZooKeeper newInstanceForGlobalZK(ExecutorService executor) { public static MockZooKeeper newInstanceForGlobalZK(ExecutorService executor, int readOpDelayMs) { try { - ObjectInstantiator mockZooKeeperInstantiator = - new ObjenesisStd().getInstantiatorOf(MockZooKeeper.class); - MockZooKeeper zk = (MockZooKeeper) mockZooKeeperInstantiator.newInstance(); - zk.init(executor); - zk.readOpDelayMs = readOpDelayMs; - zk.mutex = new ReentrantLock(); - zk.sequentialIdGenerator = new AtomicLong(); - return zk; + return createMockZooKeeperInstance(executor, readOpDelayMs); } catch (RuntimeException e) { throw e; } catch (Exception e) { @@ -138,14 +144,9 @@ public static MockZooKeeper newInstanceForGlobalZK(ExecutorService executor, int public static MockZooKeeper newInstance(ExecutorService executor, int readOpDelayMs) { try { - ObjectInstantiator mockZooKeeperInstantiator = objenesis.getInstantiatorOf(MockZooKeeper.class); - MockZooKeeper zk = (MockZooKeeper) mockZooKeeperInstantiator.newInstance(); - zk.init(executor); - zk.readOpDelayMs = readOpDelayMs; - zk.mutex = new ReentrantLock(); + MockZooKeeper zk = createMockZooKeeperInstance(executor, readOpDelayMs); ObjectInstantiator clientCnxnObjectInstantiator = objenesis.getInstantiatorOf(ClientCnxn.class); Whitebox.setInternalState(zk, "cnxn", clientCnxnObjectInstantiator.newInstance()); - zk.sequentialIdGenerator = new AtomicLong(); return zk; } catch (RuntimeException e) { throw e; @@ -154,6 +155,19 @@ public static MockZooKeeper newInstance(ExecutorService executor, int readOpDela } } + private static MockZooKeeper createMockZooKeeperInstance(ExecutorService executor, int readOpDelayMs) { + ObjectInstantiator mockZooKeeperInstantiator = + objenesis.getInstantiatorOf(MockZooKeeper.class); + MockZooKeeper zk = mockZooKeeperInstantiator.newInstance(); + zk.epheralOwnerThreadLocal = new ThreadLocal<>(); + zk.init(executor); + zk.readOpDelayMs = readOpDelayMs; + zk.mutex = new ReentrantLock(); + zk.lockInstance = ThreadLocal.withInitial(zk::createLock); + zk.sequentialIdGenerator = new AtomicLong(); + return zk; + } + private void init(ExecutorService executor) { tree = Maps.newTreeMap(); if (executor != null) { @@ -176,7 +190,8 @@ public int getSessionTimeout() { private MockZooKeeper(String quorum) throws Exception { // This constructor is never called - super(quorum, 1, event -> {}); + super(quorum, 1, event -> { + }); assert false; } @@ -185,27 +200,68 @@ public States getState() { return States.CONNECTED; } + + @Slf4j + private static class SingleAcquireAndReleaseLock { + private final AtomicBoolean acquired = new AtomicBoolean(false); + private final Lock lock; + + SingleAcquireAndReleaseLock(Lock lock) { + this.lock = lock; + } + + public void lock() { + if (acquired.compareAndSet(false, true)) { + lock.lock(); + } else { + throw new IllegalStateException("Lock was already acquired!"); + } + } + + public void unlockIfNeeded() { + if (acquired.compareAndSet(true, false)) { + lock.unlock(); + } + } + } + + private ThreadLocal lockInstance; + + private SingleAcquireAndReleaseLock createLock() { + return new SingleAcquireAndReleaseLock(mutex); + } + + private void lock() { + lockInstance.get().lock(); + } + + private void unlockIfLocked() { + lockInstance.get().unlockIfNeeded(); + } + @Override public void register(Watcher watcher) { - mutex.lock(); + lock(); sessionWatcher = watcher; - mutex.unlock(); + unlockIfLocked(); } @Override public String create(String path, byte[] data, List acl, CreateMode createMode) throws KeeperException, InterruptedException { - mutex.lock(); - final Set toNotifyCreate = Sets.newHashSet(); final Set toNotifyParent = Sets.newHashSet(); final String parent = path.substring(0, path.lastIndexOf("/")); + lock(); try { + + maybeThrowProgrammedFailure(Op.CREATE, path); - if (stopped) + if (stopped) { throw new KeeperException.ConnectionLossException(); + } if (tree.containsKey(path)) { throw new KeeperException.NodeExistsException(path); @@ -215,16 +271,17 @@ public String create(String path, byte[] data, List acl, CreateMode createM throw new KeeperException.NoNodeException(); } - if (createMode == CreateMode.EPHEMERAL_SEQUENTIAL || createMode == CreateMode.PERSISTENT_SEQUENTIAL) { - byte[] parentData = tree.get(parent).getLeft(); - int parentVersion = tree.get(parent).getRight(); + if (createMode.isSequential()) { + MockZNode parentNode = tree.get(parent); + int parentVersion = tree.get(parent).getVersion(); path = path + parentVersion; // Update parent version - tree.put(parent, Pair.of(parentData, parentVersion + 1)); + tree.put(parent, + MockZNode.of(parentNode.getContent(), parentVersion + 1, parentNode.getEphemeralOwner())); } - tree.put(path, Pair.of(data, 0)); + tree.put(path, MockZNode.of(data, 0, createMode.isEphemeral() ? getEphemeralOwner() : -1L)); toNotifyCreate.addAll(watchers.get(path)); @@ -233,8 +290,7 @@ public String create(String path, byte[] data, List acl, CreateMode createM } watchers.removeAll(path); } finally { - - mutex.unlock(); + unlockIfLocked(); } final String finalPath = path; @@ -256,68 +312,90 @@ public String create(String path, byte[] data, List acl, CreateMode createM return path; } + protected long getEphemeralOwner() { + Long epheralOwner = epheralOwnerThreadLocal.get(); + if (epheralOwner != null) { + return epheralOwner; + } + return getSessionId(); + } + + public void overrideEpheralOwner(long epheralOwner) { + epheralOwnerThreadLocal.set(epheralOwner); + } + + public void removeEpheralOwnerOverride() { + epheralOwnerThreadLocal.remove(); + } + @Override public void create(final String path, final byte[] data, final List acl, CreateMode createMode, - final StringCallback cb, final Object ctx) { + final StringCallback cb, final Object ctx) { executor.execute(() -> { - mutex.lock(); + lock(); + try { - if (stopped) { - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); - return; - } + if (stopped) { + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); + return; + } - final Set toNotifyCreate = Sets.newHashSet(); - toNotifyCreate.addAll(watchers.get(path)); + final Set toNotifyCreate = Sets.newHashSet(); + toNotifyCreate.addAll(watchers.get(path)); - final Set toNotifyParent = Sets.newHashSet(); - final String parent = path.substring(0, path.lastIndexOf("/")); - if (!parent.isEmpty()) { - toNotifyParent.addAll(watchers.get(parent)); - } + final Set toNotifyParent = Sets.newHashSet(); + final String parent = path.substring(0, path.lastIndexOf("/")); + if (!parent.isEmpty()) { + toNotifyParent.addAll(watchers.get(parent)); + } - final String name; - if (createMode != null && createMode.isSequential()) { - name = path + Long.toString(sequentialIdGenerator.getAndIncrement()); - } else { - name = path; - } + final String name; + if (createMode != null && createMode.isSequential()) { + name = path + sequentialIdGenerator.getAndIncrement(); + } else { + name = path; + } - Optional failure = programmedFailure(Op.CREATE, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx, null); - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); - } else if (tree.containsKey(path)) { - mutex.unlock(); - cb.processResult(KeeperException.Code.NODEEXISTS.intValue(), path, ctx, null); - } else if (!parent.isEmpty() && !tree.containsKey(parent)) { - mutex.unlock(); - toNotifyParent.forEach(watcher -> watcher - .process(new WatchedEvent(EventType.NodeChildrenChanged, KeeperState.SyncConnected, parent))); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); - } else { - tree.put(name, Pair.of(data, 0)); - watchers.removeAll(name); - mutex.unlock(); - cb.processResult(0, path, ctx, name); - - triggerPersistentWatches(path, parent, EventType.NodeCreated); - - toNotifyCreate.forEach( - watcher -> watcher.process( - new WatchedEvent(EventType.NodeCreated, - KeeperState.SyncConnected, - name))); - toNotifyParent.forEach( - watcher -> watcher.process( - new WatchedEvent(EventType.NodeChildrenChanged, - KeeperState.SyncConnected, - parent))); + Optional failure = programmedFailure(Op.CREATE, path); + if (failure.isPresent()) { + unlockIfLocked(); + cb.processResult(failure.get().intValue(), path, ctx, null); + } else if (stopped) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); + } else if (tree.containsKey(path)) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NODEEXISTS.intValue(), path, ctx, null); + } else if (!parent.isEmpty() && !tree.containsKey(parent)) { + unlockIfLocked(); + toNotifyParent.forEach(watcher -> watcher + .process(new WatchedEvent(EventType.NodeChildrenChanged, KeeperState.SyncConnected, + parent))); + cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); + } else { + tree.put(name, MockZNode.of(data, 0, + createMode != null && createMode.isEphemeral() ? getEphemeralOwner() : -1L)); + watchers.removeAll(name); + unlockIfLocked(); + cb.processResult(0, path, ctx, name); + + triggerPersistentWatches(path, parent, EventType.NodeCreated); + + toNotifyCreate.forEach( + watcher -> watcher.process( + new WatchedEvent(EventType.NodeCreated, + KeeperState.SyncConnected, + name))); + toNotifyParent.forEach( + watcher -> watcher.process( + new WatchedEvent(EventType.NodeChildrenChanged, + KeeperState.SyncConnected, + parent))); + } + } finally { + unlockIfLocked(); } }); @@ -325,10 +403,10 @@ public void create(final String path, final byte[] data, final List acl, Cr @Override public byte[] getData(String path, Watcher watcher, Stat stat) throws KeeperException { - mutex.lock(); + lock(); try { maybeThrowProgrammedFailure(Op.GET, path); - Pair value = tree.get(path); + MockZNode value = tree.get(path); if (value == null) { throw new KeeperException.NoNodeException(path); } else { @@ -336,12 +414,12 @@ public byte[] getData(String path, Watcher watcher, Stat stat) throws KeeperExce watchers.put(path, watcher); } if (stat != null) { - stat.setVersion(value.getRight()); + applyToStat(value, stat); } - return value.getLeft(); + return value.getContent(); } } finally { - mutex.unlock(); + unlockIfLocked(); } } @@ -358,20 +436,18 @@ public void getData(final String path, boolean watch, final DataCallback cb, fin return; } - Pair value; - mutex.lock(); + MockZNode value; + lock(); try { value = tree.get(path); } finally { - mutex.unlock(); + unlockIfLocked(); } if (value == null) { cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null, null); } else { - Stat stat = new Stat(); - stat.setVersion(value.getRight()); - cb.processResult(0, path, ctx, value.getLeft(), stat); + cb.processResult(0, path, ctx, value.getContent(), createStatForZNode(value)); } }); } @@ -380,31 +456,34 @@ public void getData(final String path, boolean watch, final DataCallback cb, fin public void getData(final String path, final Watcher watcher, final DataCallback cb, final Object ctx) { executor.execute(() -> { checkReadOpDelay(); - mutex.lock(); - Optional failure = programmedFailure(Op.GET, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx, null, null); - return; - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null, null); - return; - } - - Pair value = tree.get(path); - if (value == null) { - mutex.unlock(); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null, null); - } else { - if (watcher != null) { - watchers.put(path, watcher); + lock(); + try { + Optional failure = programmedFailure(Op.GET, path); + if (failure.isPresent()) { + unlockIfLocked(); + cb.processResult(failure.get().intValue(), path, ctx, null, null); + return; + } else if (stopped) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null, null); + return; } - Stat stat = new Stat(); - stat.setVersion(value.getRight()); - mutex.unlock(); - cb.processResult(0, path, ctx, value.getLeft(), stat); + MockZNode value = tree.get(path); + if (value == null) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null, null); + } else { + if (watcher != null) { + watchers.put(path, watcher); + } + + Stat stat = createStatForZNode(value); + unlockIfLocked(); + cb.processResult(0, path, ctx, value.getContent(), stat); + } + } finally { + unlockIfLocked(); } }); } @@ -412,44 +491,47 @@ public void getData(final String path, final Watcher watcher, final DataCallback @Override public void getChildren(final String path, final Watcher watcher, final ChildrenCallback cb, final Object ctx) { executor.execute(() -> { - mutex.lock(); - Optional failure = programmedFailure(Op.GET_CHILDREN, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx, null); - return; - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); - return; - } + lock(); + List children = Lists.newArrayList(); + try { + Optional failure = programmedFailure(Op.GET_CHILDREN, path); + if (failure.isPresent()) { + unlockIfLocked(); + cb.processResult(failure.get().intValue(), path, ctx, null); + return; + } else if (stopped) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); + return; + } - if (!tree.containsKey(path)) { - mutex.unlock(); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); - return; - } + if (!tree.containsKey(path)) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); + return; + } - List children = Lists.newArrayList(); - for (String item : tree.tailMap(path).keySet()) { - if (!item.startsWith(path)) { - break; - } else { - if (path.length() >= item.length()) { - continue; - } + for (String item : tree.tailMap(path).keySet()) { + if (!item.startsWith(path)) { + break; + } else { + if (path.length() >= item.length()) { + continue; + } - String child = item.substring(path.length() + 1); - if (item.charAt(path.length()) == '/' && !child.contains("/")) { - children.add(child); + String child = item.substring(path.length() + 1); + if (item.charAt(path.length()) == '/' && !child.contains("/")) { + children.add(child); + } } } - } - if (watcher != null) { - watchers.put(path, watcher); + if (watcher != null) { + watchers.put(path, watcher); + } + } finally { + unlockIfLocked(); } - mutex.unlock(); cb.processResult(0, path, ctx, children); }); @@ -457,7 +539,7 @@ public void getChildren(final String path, final Watcher watcher, final Children @Override public List getChildren(String path, Watcher watcher) throws KeeperException { - mutex.lock(); + lock(); try { maybeThrowProgrammedFailure(Op.GET_CHILDREN, path); @@ -465,35 +547,31 @@ public List getChildren(String path, Watcher watcher) throws KeeperExcep throw new KeeperException.NoNodeException(); } - List children = Lists.newArrayList(); - for (String item : tree.tailMap(path).keySet()) { - if (!item.startsWith(path)) { - break; - } else { - if (path.length() >= item.length()) { - continue; - } + String firstKey = path.equals("/") ? path : path + "/"; + String lastKey = path.equals("/") ? "0" : path + "0"; // '0' is lexicographically just after '/' - String child = item.substring(path.length() + 1); - if (!child.contains("/")) { - children.add(child); - } - } - } + Set children = new TreeSet<>(); + tree.subMap(firstKey, false, lastKey, false).forEach((key, value) -> { + String relativePath = key.replace(firstKey, ""); + + // Only return first-level children + String child = relativePath.split("/", 2)[0]; + children.add(child); + }); if (watcher != null) { watchers.put(path, watcher); } - return children; + return new ArrayList<>(children); } finally { - mutex.unlock(); + unlockIfLocked(); } } @Override public List getChildren(String path, boolean watch) throws KeeperException, InterruptedException { - mutex.lock(); + lock(); try { maybeThrowProgrammedFailure(Op.GET_CHILDREN, path); @@ -503,173 +581,153 @@ public List getChildren(String path, boolean watch) throws KeeperExcepti throw new KeeperException.NoNodeException(); } - List children = Lists.newArrayList(); - for (String item : tree.tailMap(path).keySet()) { - if (!item.startsWith(path)) { - break; - } else { - if (path.length() >= item.length()) { - continue; - } - String child = item.substring(path.length()); - if (child.indexOf("/") == 0) { - child = child.substring(1); - log.debug("child: '{}'", child); - if (!child.contains("/")) { - children.add(child); - } - } - } - } - return children; + String firstKey = path.equals("/") ? path : path + "/"; + String lastKey = path.equals("/") ? "0" : path + "0"; // '0' is lexicographically just after '/' + + Set children = new TreeSet<>(); + tree.subMap(firstKey, false, lastKey, false).forEach((key, value) -> { + String relativePath = key.replace(firstKey, ""); + + // Only return first-level children + String child = relativePath.split("/", 2)[0]; + children.add(child); + }); + + return new ArrayList<>(children); } finally { - mutex.unlock(); + unlockIfLocked(); } } @Override public void getChildren(final String path, boolean watcher, final Children2Callback cb, final Object ctx) { executor.execute(() -> { - mutex.lock(); + Set children = new TreeSet<>(); + lock(); + try { + Optional failure = programmedFailure(Op.GET_CHILDREN, path); + if (failure.isPresent()) { + unlockIfLocked(); + cb.processResult(failure.get().intValue(), path, ctx, null, null); + return; + } else if (stopped) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null, null); + return; + } else if (!tree.containsKey(path)) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null, null); + return; + } - Optional failure = programmedFailure(Op.GET_CHILDREN, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx, null, null); - return; - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null, null); - return; - } else if (!tree.containsKey(path)) { - mutex.unlock(); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null, null); - return; - } + String firstKey = path.equals("/") ? path : path + "/"; + String lastKey = path.equals("/") ? "0" : path + "0"; // '0' is lexicographically just after '/' - log.debug("getChildren path={}", path); - List children = Lists.newArrayList(); - for (String item : tree.tailMap(path).keySet()) { - log.debug("Checking path {}", item); - if (!item.startsWith(path)) { - break; - } else if (item.equals(path)) { - continue; - } else { - String child = item.substring(path.length()); - if (child.indexOf("/") == 0) { - child = child.substring(1); - log.debug("child: '{}'", child); - if (!child.contains("/")) { - children.add(child); - } - } - } - } + tree.subMap(firstKey, false, lastKey, false).forEach((key, value) -> { + String relativePath = key.replace(firstKey, ""); - log.debug("getChildren done path={} result={}", path, children); - mutex.unlock(); - cb.processResult(0, path, ctx, children, new Stat()); + // Only return first-level children + String child = relativePath.split("/", 2)[0]; + children.add(child); + }); + } finally { + unlockIfLocked(); + } + cb.processResult(0, path, ctx, new ArrayList<>(children), new Stat()); }); } @Override public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException { - mutex.lock(); + lock(); try { maybeThrowProgrammedFailure(Op.EXISTS, path); - if (stopped) + if (stopped) { throw new KeeperException.ConnectionLossException(); + } if (tree.containsKey(path)) { - Stat stat = new Stat(); - stat.setVersion(tree.get(path).getRight()); - return stat; + return createStatForZNode(tree.get(path)); } else { return null; } } finally { - mutex.unlock(); + unlockIfLocked(); + } + } + + private static Stat createStatForZNode(MockZNode zNode) { + return applyToStat(zNode, new Stat()); + } + + private static Stat applyToStat(MockZNode zNode, Stat stat) { + stat.setVersion(zNode.getVersion()); + if (zNode.getEphemeralOwner() != -1L) { + stat.setEphemeralOwner(zNode.getEphemeralOwner()); } + return stat; } @Override public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException { - mutex.lock(); + lock(); try { maybeThrowProgrammedFailure(Op.EXISTS, path); - if (stopped) + if (stopped) { throw new KeeperException.ConnectionLossException(); + } if (watcher != null) { watchers.put(path, watcher); } if (tree.containsKey(path)) { - Stat stat = new Stat(); - stat.setVersion(tree.get(path).getRight()); - return stat; + return createStatForZNode(tree.get(path)); } else { return null; } } finally { - mutex.unlock(); + unlockIfLocked(); } } @Override public void exists(String path, boolean watch, StatCallback cb, Object ctx) { - executor.execute(() -> { - mutex.lock(); - Optional failure = programmedFailure(Op.EXISTS, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx, null); - return; - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); - return; - } - - if (tree.containsKey(path)) { - mutex.unlock(); - cb.processResult(0, path, ctx, new Stat()); - } else { - mutex.unlock(); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); - } - }); + exists(path, null, cb, ctx); } @Override public void exists(String path, Watcher watcher, StatCallback cb, Object ctx) { executor.execute(() -> { - mutex.lock(); - Optional failure = programmedFailure(Op.EXISTS, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx, null); - return; - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); - return; - } + lock(); + try { + Optional failure = programmedFailure(Op.EXISTS, path); + if (failure.isPresent()) { + unlockIfLocked(); + cb.processResult(failure.get().intValue(), path, ctx, null); + return; + } else if (stopped) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); + return; + } - if (watcher != null) { - watchers.put(path, watcher); - } + if (watcher != null) { + watchers.put(path, watcher); + } - if (tree.containsKey(path)) { - mutex.unlock(); - cb.processResult(0, path, ctx, new Stat()); - } else { - mutex.unlock(); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); + if (tree.containsKey(path)) { + unlockIfLocked(); + cb.processResult(0, path, ctx, new Stat()); + } else { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); + } + } finally { + unlockIfLocked(); } }); } @@ -693,11 +751,10 @@ public void sync(String path, VoidCallback cb, Object ctx) { @Override public Stat setData(final String path, byte[] data, int version) throws KeeperException, InterruptedException { - mutex.lock(); - final Set toNotify = Sets.newHashSet(); - int newVersion; + MockZNode newZNode; + lock(); try { maybeThrowProgrammedFailure(Op.SET, path); @@ -709,21 +766,22 @@ public Stat setData(final String path, byte[] data, int version) throws KeeperEx throw new KeeperException.NoNodeException(); } - int currentVersion = tree.get(path).getRight(); + MockZNode mockZNode = tree.get(path); + int currentVersion = mockZNode.getVersion(); // Check version if (version != -1 && version != currentVersion) { throw new KeeperException.BadVersionException(path); } - newVersion = currentVersion + 1; log.debug("[{}] Updating -- current version: {}", path, currentVersion); - tree.put(path, Pair.of(data, newVersion)); + newZNode = MockZNode.of(data, currentVersion + 1, mockZNode.getEphemeralOwner()); + tree.put(path, newZNode); toNotify.addAll(watchers.get(path)); watchers.removeAll(path); } finally { - mutex.unlock(); + unlockIfLocked(); } executor.execute(() -> { @@ -733,9 +791,7 @@ public Stat setData(final String path, byte[] data, int version) throws KeeperEx .process(new WatchedEvent(EventType.NodeDataChanged, KeeperState.SyncConnected, path))); }); - Stat stat = new Stat(); - stat.setVersion(newVersion); - return stat; + return createStatForZNode(newZNode); } @Override @@ -747,43 +803,45 @@ public void setData(final String path, final byte[] data, int version, final Sta executor.execute(() -> { final Set toNotify = Sets.newHashSet(); + Stat stat; + lock(); + try { - mutex.lock(); + Optional failure = programmedFailure(Op.SET, path); + if (failure.isPresent()) { + unlockIfLocked(); + cb.processResult(failure.get().intValue(), path, ctx, null); + return; + } else if (stopped) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); + return; + } - Optional failure = programmedFailure(Op.SET, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx, null); - return; - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, null); - return; - } + if (!tree.containsKey(path)) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); + return; + } - if (!tree.containsKey(path)) { - mutex.unlock(); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx, null); - return; - } + MockZNode mockZNode = tree.get(path); + int currentVersion = mockZNode.getVersion(); - int currentVersion = tree.get(path).getRight(); + // Check version + if (version != -1 && version != currentVersion) { + log.debug("[{}] Current version: {} -- Expected: {}", path, currentVersion, version); + unlockIfLocked(); + cb.processResult(KeeperException.Code.BADVERSION.intValue(), path, ctx, null); + return; + } - // Check version - if (version != -1 && version != currentVersion) { - log.debug("[{}] Current version: {} -- Expected: {}", path, currentVersion, version); - mutex.unlock(); - cb.processResult(KeeperException.Code.BADVERSION.intValue(), path, ctx, null); - return; + log.debug("[{}] Updating -- current version: {}", path, currentVersion); + MockZNode newZNode = MockZNode.of(data, currentVersion + 1, mockZNode.getEphemeralOwner()); + tree.put(path, newZNode); + stat = createStatForZNode(newZNode); + } finally { + unlockIfLocked(); } - - int newVersion = currentVersion + 1; - log.debug("[{}] Updating -- current version: {}", path, currentVersion); - tree.put(path, Pair.of(data, newVersion)); - Stat stat = new Stat(); - stat.setVersion(newVersion); - - mutex.unlock(); cb.processResult(0, path, ctx, stat); toNotify.addAll(watchers.get(path)); @@ -805,7 +863,7 @@ public void delete(final String path, int version) throws InterruptedException, final Set toNotifyParent; final String parent; - mutex.lock(); + lock(); try { if (stopped) { throw new KeeperException.ConnectionLossException(); @@ -816,7 +874,7 @@ public void delete(final String path, int version) throws InterruptedException, } if (version != -1) { - int currentVersion = tree.get(path).getRight(); + int currentVersion = tree.get(path).getVersion(); if (version != currentVersion) { throw new KeeperException.BadVersionException(path); } @@ -835,7 +893,7 @@ public void delete(final String path, int version) throws InterruptedException, watchers.removeAll(path); } finally { - mutex.unlock(); + unlockIfLocked(); } executor.execute(() -> { @@ -857,50 +915,55 @@ public void delete(final String path, int version) throws InterruptedException, @Override public void delete(final String path, int version, final VoidCallback cb, final Object ctx) { Runnable r = () -> { - mutex.lock(); - final Set toNotifyDelete = Sets.newHashSet(); - toNotifyDelete.addAll(watchers.get(path)); - - final Set toNotifyParent = Sets.newHashSet(); - final String parent = path.substring(0, path.lastIndexOf("/")); - if (!parent.isEmpty()) { - toNotifyParent.addAll(watchers.get(parent)); - } - watchers.removeAll(path); + lock(); + try { + final Set toNotifyDelete = Sets.newHashSet(); + toNotifyDelete.addAll(watchers.get(path)); - Optional failure = programmedFailure(Op.DELETE, path); - if (failure.isPresent()) { - mutex.unlock(); - cb.processResult(failure.get().intValue(), path, ctx); - } else if (stopped) { - mutex.unlock(); - cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx); - } else if (!tree.containsKey(path)) { - mutex.unlock(); - cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx); - } else if (hasChildren(path)) { - mutex.unlock(); - cb.processResult(KeeperException.Code.NOTEMPTY.intValue(), path, ctx); - } else { - if (version != -1) { - int currentVersion = tree.get(path).getRight(); - if (version != currentVersion) { - mutex.unlock(); - cb.processResult(KeeperException.Code.BADVERSION.intValue(), path, ctx); - return; - } + final Set toNotifyParent = Sets.newHashSet(); + final String parent = path.substring(0, path.lastIndexOf("/")); + if (!parent.isEmpty()) { + toNotifyParent.addAll(watchers.get(parent)); } + watchers.removeAll(path); + + Optional failure = programmedFailure(Op.DELETE, path); + if (failure.isPresent()) { + unlockIfLocked(); + cb.processResult(failure.get().intValue(), path, ctx); + } else if (stopped) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx); + } else if (!tree.containsKey(path)) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx); + } else if (hasChildren(path)) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.NOTEMPTY.intValue(), path, ctx); + } else { + if (version != -1) { + int currentVersion = tree.get(path).getVersion(); + if (version != currentVersion) { + unlockIfLocked(); + cb.processResult(KeeperException.Code.BADVERSION.intValue(), path, ctx); + return; + } + } - tree.remove(path); + tree.remove(path); - mutex.unlock(); - cb.processResult(0, path, ctx); + unlockIfLocked(); + cb.processResult(0, path, ctx); - toNotifyDelete.forEach(watcher -> watcher - .process(new WatchedEvent(EventType.NodeDeleted, KeeperState.SyncConnected, path))); - toNotifyParent.forEach(watcher -> watcher - .process(new WatchedEvent(EventType.NodeChildrenChanged, KeeperState.SyncConnected, parent))); - triggerPersistentWatches(path, parent, EventType.NodeDeleted); + toNotifyDelete.forEach(watcher -> watcher + .process(new WatchedEvent(EventType.NodeDeleted, KeeperState.SyncConnected, path))); + toNotifyParent.forEach(watcher -> watcher + .process(new WatchedEvent(EventType.NodeChildrenChanged, KeeperState.SyncConnected, + parent))); + triggerPersistentWatches(path, parent, EventType.NodeDeleted); + } + } finally { + unlockIfLocked(); } }; @@ -908,7 +971,6 @@ public void delete(final String path, int version, final VoidCallback cb, final executor.execute(r); } catch (RejectedExecutionException ree) { cb.processResult(KeeperException.Code.SESSIONEXPIRED.intValue(), path, ctx); - return; } } @@ -917,30 +979,62 @@ public void delete(final String path, int version, final VoidCallback cb, final public void multi(Iterable ops, AsyncCallback.MultiCallback cb, Object ctx) { try { List res = multi(ops); - cb.processResult(KeeperException.Code.OK.intValue(), (String)null, ctx, res); + cb.processResult(KeeperException.Code.OK.intValue(), null, ctx, res); } catch (Exception e) { - cb.processResult(KeeperException.Code.APIERROR.intValue(), (String)null, ctx, null); + cb.processResult(KeeperException.Code.APIERROR.intValue(), null, ctx, null); } } @Override public List multi(Iterable ops) throws InterruptedException, KeeperException { List res = new ArrayList<>(); - for (org.apache.zookeeper.Op op : ops) { - switch (op.getType()) { - case ZooDefs.OpCode.create: - this.create(op.getPath(), ((org.apache.zookeeper.Op.Create)op).data, null, null); - res.add(new OpResult.CreateResult(op.getPath())); - break; - case ZooDefs.OpCode.delete: - this.delete(op.getPath(), -1); - res.add(new OpResult.DeleteResult()); - break; - case ZooDefs.OpCode.setData: - this.create(op.getPath(), ((org.apache.zookeeper.Op.Create)op).data, null, null); - res.add(new OpResult.SetDataResult(null)); - break; - default: + try { + for (org.apache.zookeeper.Op op : ops) { + switch (op.getType()) { + case ZooDefs.OpCode.create: { + org.apache.zookeeper.Op.Create opc = ((org.apache.zookeeper.Op.Create) op); + CreateMode cm = CreateMode.fromFlag(opc.flags); + String path = this.create(op.getPath(), opc.data, null, cm); + res.add(new OpResult.CreateResult(path)); + break; + } + case ZooDefs.OpCode.delete: + this.delete(op.getPath(), Whitebox.getInternalState(op, "version")); + res.add(new OpResult.DeleteResult()); + break; + case ZooDefs.OpCode.setData: { + Stat stat = this.setData(op.getPath(), Whitebox.getInternalState(op, "data"), + Whitebox.getInternalState(op, "version")); + res.add(new OpResult.SetDataResult(stat)); + break; + } + case ZooDefs.OpCode.getChildren: { + try { + List children = this.getChildren(op.getPath(), null); + res.add(new OpResult.GetChildrenResult(children)); + } catch (KeeperException e) { + res.add(new OpResult.ErrorResult(e.code().intValue())); + } + break; + } + case ZooDefs.OpCode.getData: { + Stat stat = new Stat(); + try { + byte[] payload = this.getData(op.getPath(), null, stat); + res.add(new OpResult.GetDataResult(payload, stat)); + } catch (KeeperException e) { + res.add(new OpResult.ErrorResult(e.code().intValue())); + } + break; + } + default: + } + } + } catch (KeeperException e) { + res.add(new OpResult.ErrorResult(e.code().intValue())); + int total = Iterables.size(ops); + for (int i = res.size(); i < total; i++) { + res.add(new OpResult.ErrorResult(KeeperException.Code.RUNTIMEINCONSISTENCY.intValue())); } } return res; @@ -972,7 +1066,7 @@ public void close() throws InterruptedException { } public void shutdown() throws InterruptedException { - mutex.lock(); + lock(); try { stopped = true; tree.clear(); @@ -984,7 +1078,7 @@ public void shutdown() throws InterruptedException { log.error("MockZooKeeper shutdown had error", ex); } } finally { - mutex.unlock(); + unlockIfLocked(); } } diff --git a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeperSession.java b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeperSession.java index 499da0e345d44..a33d4483c89f3 100644 --- a/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeperSession.java +++ b/testmocks/src/main/java/org/apache/zookeeper/MockZooKeeperSession.java @@ -44,7 +44,7 @@ public class MockZooKeeperSession extends ZooKeeper { private static final Objenesis objenesis = new ObjenesisStd(); - private static final AtomicInteger sessionIdGenerator = new AtomicInteger(0); + private static final AtomicInteger sessionIdGenerator = new AtomicInteger(1000); public static MockZooKeeperSession newInstance(MockZooKeeper mockZooKeeper) { ObjectInstantiator instantiator = objenesis.getInstantiatorOf(MockZooKeeperSession.class); @@ -80,13 +80,23 @@ public void register(Watcher watcher) { @Override public String create(String path, byte[] data, List acl, CreateMode createMode) throws KeeperException, InterruptedException { - return mockZooKeeper.create(path, data, acl, createMode); + try { + mockZooKeeper.overrideEpheralOwner(getSessionId()); + return mockZooKeeper.create(path, data, acl, createMode); + } finally { + mockZooKeeper.removeEpheralOwnerOverride(); + } } @Override public void create(final String path, final byte[] data, final List acl, CreateMode createMode, final AsyncCallback.StringCallback cb, final Object ctx) { - mockZooKeeper.create(path, data, acl, createMode, cb, ctx); + try { + mockZooKeeper.overrideEpheralOwner(getSessionId()); + mockZooKeeper.create(path, data, acl, createMode, cb, ctx); + } finally { + mockZooKeeper.removeEpheralOwnerOverride(); + } } @Override diff --git a/tests/bc_2_0_0/pom.xml b/tests/bc_2_0_0/pom.xml index 50d1883cc07dd..83a7442aa5a3b 100644 --- a/tests/bc_2_0_0/pom.xml +++ b/tests/bc_2_0_0/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 bc_2_0_0 diff --git a/tests/bc_2_0_1/pom.xml b/tests/bc_2_0_1/pom.xml index 10830353ee6d4..a1836f0ad4fed 100644 --- a/tests/bc_2_0_1/pom.xml +++ b/tests/bc_2_0_1/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 bc_2_0_1 diff --git a/tests/bc_2_6_0/pom.xml b/tests/bc_2_6_0/pom.xml index 2a81b22e597d5..555c5b118c8ee 100644 --- a/tests/bc_2_6_0/pom.xml +++ b/tests/bc_2_6_0/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 diff --git a/tests/docker-images/java-test-functions/pom.xml b/tests/docker-images/java-test-functions/pom.xml index 72dff476c39b0..7461043fd7a32 100644 --- a/tests/docker-images/java-test-functions/pom.xml +++ b/tests/docker-images/java-test-functions/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar.tests docker-images - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 java-test-functions @@ -49,7 +49,6 @@ com.fasterxml.jackson.core jackson-databind - ${jackson.databind.version} provided diff --git a/tests/docker-images/java-test-functions/src/main/java/org/apache/pulsar/tests/integration/io/TestByteStateSource.java b/tests/docker-images/java-test-functions/src/main/java/org/apache/pulsar/tests/integration/io/TestByteStateSource.java new file mode 100644 index 0000000000000..4fe382f5ce758 --- /dev/null +++ b/tests/docker-images/java-test-functions/src/main/java/org/apache/pulsar/tests/integration/io/TestByteStateSource.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.io; + +import java.nio.ByteBuffer; +import java.util.Base64; +import java.util.Map; +import org.apache.pulsar.functions.api.Record; +import org.apache.pulsar.io.core.Source; +import org.apache.pulsar.io.core.SourceContext; + +public class TestByteStateSource implements Source { + + private SourceContext sourceContext; + + public static final String VALUE_BASE64 = "0a8001127e0a172e6576656e74732e437573746f6d65724372656174656412630a243" + + "2336366666263652d623038342d346631352d616565342d326330643135356131666" + + "36312026e311a3700000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000"; + + @Override + public void open(Map config, SourceContext sourceContext) throws Exception { + sourceContext.putState("initial", ByteBuffer.wrap(Base64.getDecoder().decode(VALUE_BASE64))); + this.sourceContext = sourceContext; + } + + @Override + public Record read() throws Exception { + Thread.sleep(50); + ByteBuffer initial = sourceContext.getState("initial"); + sourceContext.putState("now", initial); + return initial::array; + } + + @Override + public void close() throws Exception { + + } +} \ No newline at end of file diff --git a/tests/docker-images/java-test-image/pom.xml b/tests/docker-images/java-test-image/pom.xml index 7dd26d8040ebb..79b19f3ac9330 100644 --- a/tests/docker-images/java-test-image/pom.xml +++ b/tests/docker-images/java-test-image/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar.tests docker-images - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 java-test-image diff --git a/tests/docker-images/latest-version-image/conf/presto/jvm.config b/tests/docker-images/latest-version-image/conf/presto/jvm.config index 28db36a3937da..e8a0cec43ba0e 100644 --- a/tests/docker-images/latest-version-image/conf/presto/jvm.config +++ b/tests/docker-images/latest-version-image/conf/presto/jvm.config @@ -28,3 +28,4 @@ -XX:+ExitOnOutOfMemoryError -Dpresto-temporarily-allow-java8=true -Djdk.attach.allowAttachSelf=true +-javaagent:java-version-trim-agent.jar \ No newline at end of file diff --git a/tests/docker-images/latest-version-image/pom.xml b/tests/docker-images/latest-version-image/pom.xml index 8d0aba77ad47f..d5f17ff3b59cd 100644 --- a/tests/docker-images/latest-version-image/pom.xml +++ b/tests/docker-images/latest-version-image/pom.xml @@ -23,7 +23,7 @@ org.apache.pulsar.tests docker-images - 2.9.0-SNAPSHOT + 2.9.3 4.0.0 latest-version-image diff --git a/tests/docker-images/pom.xml b/tests/docker-images/pom.xml index f68e7d4dbbde7..b68ddad98f8fa 100644 --- a/tests/docker-images/pom.xml +++ b/tests/docker-images/pom.xml @@ -27,7 +27,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 docker-images Apache Pulsar :: Tests :: Docker Images diff --git a/tests/integration/pom.xml b/tests/integration/pom.xml index 78382c292984c..97c72032aa1cf 100644 --- a/tests/integration/pom.xml +++ b/tests/integration/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 integration @@ -148,12 +148,14 @@ org.elasticsearch.client elasticsearch-rest-high-level-client + test com.rabbitmq amqp-client ${rabbitmq-client.version} + test diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/auth/admin/GetTopicsOfNamespaceWithAuthTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/auth/admin/GetTopicsOfNamespaceWithAuthTest.java new file mode 100644 index 0000000000000..68de70d069273 --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/auth/admin/GetTopicsOfNamespaceWithAuthTest.java @@ -0,0 +1,208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.auth.admin; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.fail; +import com.google.common.io.Files; +import java.io.File; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.Cleanup; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.broker.authentication.AuthenticationProviderToken; +import org.apache.pulsar.client.admin.PulsarAdmin; +import org.apache.pulsar.client.admin.PulsarAdminException; +import org.apache.pulsar.client.api.AuthenticationFactory; +import org.apache.pulsar.client.impl.auth.AuthenticationToken; +import org.apache.pulsar.common.policies.data.AuthAction; +import org.apache.pulsar.tests.TestRetrySupport; +import org.apache.pulsar.tests.integration.containers.PulsarContainer; +import org.apache.pulsar.tests.integration.containers.ZKContainer; +import org.apache.pulsar.tests.integration.topologies.PulsarCluster; +import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec; +import org.apache.pulsar.tests.integration.utils.DockerUtils; +import org.elasticsearch.common.collect.Set; +import org.testcontainers.containers.Network; +import org.testcontainers.shaded.org.apache.commons.lang.RandomStringUtils; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +/** + * GetTopicsOfNamespaceWithAuthTest will test GetTopics operation with and without the proper permission. + */ +@Slf4j +public class GetTopicsOfNamespaceWithAuthTest extends TestRetrySupport { + + private static final String CLUSTER_PREFIX = "get-topics-auth"; + private static final String PRIVATE_KEY_PATH_INSIDE_CONTAINER = "/tmp/private.key"; + private static final String PUBLIC_KEY_PATH_INSIDE_CONTAINER = "/tmp/public.key"; + + private static final String SUPER_USER_ROLE = "super-user"; + private String superUserAuthToken; + private static final String PROXY_ROLE = "proxy"; + private String proxyAuthToken; + private static final String REGULAR_USER_ROLE = "client"; + private String clientAuthToken; + private File publicKeyFile; + + private PulsarCluster pulsarCluster; + private PulsarContainer cmdContainer; + + @Override + @BeforeClass(alwaysRun = true) + protected void setup() throws Exception { + incrementSetupNumber(); + // Before starting the cluster, generate the secret key and the token + // Use Zk container to have 1 container available before starting the cluster + final String clusterName = String.format("%s-%s", CLUSTER_PREFIX, RandomStringUtils.randomAlphabetic(6)); + final String cliContainerName = String.format("%s-%s", "cli", RandomStringUtils.randomAlphabetic(6)); + cmdContainer = new ZKContainer<>(cliContainerName); + cmdContainer + .withNetwork(Network.newNetwork()) + .withNetworkAliases(ZKContainer.NAME) + .withEnv("zkServers", ZKContainer.NAME); + cmdContainer.start(); + + createKeysAndTokens(cmdContainer); + + PulsarClusterSpec spec = PulsarClusterSpec.builder() + .numBookies(2) + .numBrokers(2) + .numProxies(1) + .clusterName(clusterName) + .brokerEnvs(getBrokerSettingsEnvs()) + .proxyEnvs(getProxySettingsEnvs()) + .brokerMountFiles(Collections.singletonMap(publicKeyFile.toString(), PUBLIC_KEY_PATH_INSIDE_CONTAINER)) + .proxyMountFiles(Collections.singletonMap(publicKeyFile.toString(), PUBLIC_KEY_PATH_INSIDE_CONTAINER)) + .build(); + + pulsarCluster = PulsarCluster.forSpec(spec); + pulsarCluster.start(); + } + + @Override + @AfterClass(alwaysRun = true) + public void cleanup() { + markCurrentSetupNumberCleaned(); + if (cmdContainer != null) { + cmdContainer.stop(); + } + if (pulsarCluster != null) { + pulsarCluster.stop(); + } + } + + private Map getBrokerSettingsEnvs() { + Map envs = new HashMap<>(); + envs.put("authenticationEnabled", "true"); + envs.put("authenticationProviders", AuthenticationProviderToken.class.getName()); + envs.put("authorizationEnabled", "true"); + envs.put("superUserRoles", String.format("%s,%s", SUPER_USER_ROLE, PROXY_ROLE)); + envs.put("brokerClientAuthenticationPlugin", AuthenticationToken.class.getName()); + envs.put("brokerClientAuthenticationParameters", String.format("token:%s", superUserAuthToken)); + envs.put("authenticationRefreshCheckSeconds", "1"); + envs.put("authenticateOriginalAuthData", "true"); + envs.put("tokenPublicKey", "file://" + PUBLIC_KEY_PATH_INSIDE_CONTAINER); + return envs; + } + + private Map getProxySettingsEnvs() { + Map envs = new HashMap<>(); + envs.put("authenticationEnabled", "true"); + envs.put("authenticationProviders", AuthenticationProviderToken.class.getName()); + envs.put("authorizationEnabled", "true"); + envs.put("brokerClientAuthenticationPlugin", AuthenticationToken.class.getName()); + envs.put("brokerClientAuthenticationParameters", String.format("token:%s", proxyAuthToken)); + envs.put("authenticationRefreshCheckSeconds", "1"); + envs.put("forwardAuthorizationCredentials", "true"); + envs.put("tokenPublicKey", "file://" + PUBLIC_KEY_PATH_INSIDE_CONTAINER); + return envs; + } + + protected void createKeysAndTokens(PulsarContainer container) throws Exception { + container + .execCmd(PulsarCluster.PULSAR_COMMAND_SCRIPT, "tokens", "create-key-pair", + "--output-private-key", PRIVATE_KEY_PATH_INSIDE_CONTAINER, + "--output-public-key", PUBLIC_KEY_PATH_INSIDE_CONTAINER); + + byte[] publicKeyBytes = DockerUtils + .runCommandWithRawOutput(container.getDockerClient(), container.getContainerId(), + "/bin/cat", PUBLIC_KEY_PATH_INSIDE_CONTAINER) + .getStdout(); + + publicKeyFile = File.createTempFile("public-", ".key", new File("/tmp")); + Files.write(publicKeyBytes, publicKeyFile); + + clientAuthToken = container + .execCmd(PulsarCluster.PULSAR_COMMAND_SCRIPT, "tokens", "create", + "--private-key", "file://" + PRIVATE_KEY_PATH_INSIDE_CONTAINER, + "--subject", REGULAR_USER_ROLE) + .getStdout().trim(); + log.info("Created client token: {}", clientAuthToken); + + superUserAuthToken = container + .execCmd(PulsarCluster.PULSAR_COMMAND_SCRIPT, "tokens", "create", + "--private-key", "file://" + PRIVATE_KEY_PATH_INSIDE_CONTAINER, + "--subject", SUPER_USER_ROLE) + .getStdout().trim(); + log.info("Created super-user token: {}", superUserAuthToken); + + proxyAuthToken = container + .execCmd(PulsarCluster.PULSAR_COMMAND_SCRIPT, "tokens", "create", + "--private-key", "file://" + PRIVATE_KEY_PATH_INSIDE_CONTAINER, + "--subject", PROXY_ROLE) + .getStdout().trim(); + log.info("Created proxy token: {}", proxyAuthToken); + } + + @Test + public void testGetTopicsOfNamespaceOpsWithConsumePermission() throws Exception { + @Cleanup + PulsarAdmin superUserAdmin = PulsarAdmin.builder() + .serviceHttpUrl(pulsarCluster.getHttpServiceUrl()) + .authentication(AuthenticationFactory.token(superUserAuthToken)) + .build(); + + @Cleanup + PulsarAdmin clientAdmin = PulsarAdmin.builder() + .serviceHttpUrl(pulsarCluster.getHttpServiceUrl()) + .authentication(AuthenticationFactory.token(clientAuthToken)) + .build(); + + // do some operation without grant any permissions + try { + clientAdmin.namespaces().getTopics("public/default"); + fail("list topics operation should fail because the client hasn't permission to do"); + } catch (PulsarAdminException e) { + assertEquals(e.getStatusCode(), 401); + } + + // grant consume permission to the role + superUserAdmin.namespaces().grantPermissionOnNamespace("public/default", + REGULAR_USER_ROLE, Set.of(AuthAction.consume)); + + // then do some get topics operations again, it should success + List topics = clientAdmin.namespaces().getTopics("public/default"); + assertEquals(topics.size(), 0); + } +} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java index bd712ca4921c2..f4dc1c8ecf07d 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/cli/CLITest.java @@ -334,7 +334,7 @@ public void testSchemaCLI() throws Exception { ); fail("Command should have exited with non-zero"); } catch (ContainerExecException e) { - assertTrue(e.getResult().getStderr().contains("Reason: HTTP 404 Not Found")); + assertTrue(e.getResult().getStderr().contains("Schema not found")); } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BKContainer.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BKContainer.java index 36f17cd35cd0b..b294cac4e701e 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BKContainer.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/containers/BKContainer.java @@ -28,5 +28,6 @@ public class BKContainer extends PulsarContainer { public BKContainer(String clusterName, String hostName) { super( clusterName, hostName, hostName, "bin/run-bookie.sh", BOOKIE_PORT, INVALID_PORT); + tailContainerLog(); } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java index 87357a04cf6fb..ad97740c80895 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarFunctionsTest.java @@ -120,7 +120,7 @@ protected Map produceMessagesToInputTopic(String inputTopicName, return kvs; } - public void testFunctionLocalRun(Runtime runtime) throws Exception { + protected void testFunctionLocalRun(Runtime runtime) throws Exception { if (functionRuntimeType == FunctionRuntimeType.THREAD) { return; } @@ -243,7 +243,7 @@ public void testFunctionLocalRun(Runtime runtime) throws Exception { } - public void testWindowFunction(String type, String[] expectedResults) throws Exception { + protected void testWindowFunction(String type, String[] expectedResults) throws Exception { int NUM_OF_MESSAGES = 100; int windowLengthCount = 10; int slidingIntervalCount = 5; diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarStateTest.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarStateTest.java index c5fb6ecffebff..5b9041b0916c7 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarStateTest.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/functions/PulsarStateTest.java @@ -18,6 +18,16 @@ */ package org.apache.pulsar.tests.integration.functions; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.pulsar.tests.integration.functions.utils.CommandGenerator.JAVAJAR; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertFalse; +import static org.testng.Assert.assertNotEquals; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; +import com.google.common.base.Utf8; +import java.util.Base64; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; @@ -40,13 +50,6 @@ import org.awaitility.Awaitility; import org.testng.annotations.Test; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.pulsar.tests.integration.functions.utils.CommandGenerator.JAVAJAR; -import static org.apache.pulsar.tests.integration.suites.PulsarTestSuite.retryStrategically; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertTrue; -import static org.testng.Assert.fail; - /** * State related test cases. */ @@ -58,6 +61,11 @@ public class PulsarStateTest extends PulsarStandaloneTestSuite { public static final String WORDCOUNT_PYTHON_FILE = "wordcount_function.py"; + public static final String VALUE_BASE64 = "0a8001127e0a172e6576656e74732e437573746f6d65724372656174656412630a243" + + "2336366666263652d623038342d346631352d616565342d326330643135356131666" + + "36312026e311a3700000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000"; + @Test(groups = {"python_state", "state", "function", "python_function"}) public void testPythonWordCountFunction() throws Exception { String inputTopicName = "test-wordcount-py-input-" + randomName(8); @@ -184,6 +192,54 @@ public void testSinkState() throws Exception { getSinkInfoNotFound(sinkName); } + @Test(groups = {"java_state", "state", "function", "java_function"}) + public void testBytes2StringNotUTF8() { + byte[] valueBytes = Base64.getDecoder().decode(VALUE_BASE64); + assertFalse(Utf8.isWellFormed(valueBytes)); + assertNotEquals(valueBytes, new String(valueBytes, UTF_8).getBytes(UTF_8)); + } + + @Test(groups = {"java_state", "state", "function", "java_function"}) + public void testSourceByteState() throws Exception { + String outputTopicName = "test-state-source-output-" + randomName(8); + String sourceName = "test-state-source-" + randomName(8); + + submitSourceConnector(sourceName, outputTopicName, "org.apache.pulsar.tests.integration.io.TestByteStateSource", JAVAJAR); + + // get source info + getSourceInfoSuccess(sourceName); + + // get source status + getSourceStatus(sourceName); + + try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(container.getHttpServiceUrl()).build()) { + + Awaitility.await().ignoreExceptions().untilAsserted(() -> { + SourceStatus status = admin.sources().getSourceStatus("public", "default", sourceName); + assertEquals(status.getInstances().size(), 1); + assertTrue(status.getInstances().get(0).getStatus().numWritten > 0); + }); + + { + FunctionState functionState = + admin.functions().getFunctionState("public", "default", sourceName, "initial"); + assertNull(functionState.getStringValue()); + assertEquals(functionState.getByteValue(), Base64.getDecoder().decode(VALUE_BASE64)); + } + + Awaitility.await().ignoreExceptions().untilAsserted(() -> { + FunctionState functionState = admin.functions().getFunctionState("public", "default", sourceName, "now"); + assertNull(functionState.getStringValue()); + assertEquals(functionState.getByteValue(), Base64.getDecoder().decode(VALUE_BASE64)); + }); + } + + // delete source + deleteSource(sourceName); + + getSourceInfoNotFound(sourceName); + } + private void submitSourceConnector(String sourceName, String outputTopicName, String className, diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sources/debezium/DebeziumMySqlSourceTester.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sources/debezium/DebeziumMySqlSourceTester.java index 3cb64db8a7de0..7958fa019925f 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sources/debezium/DebeziumMySqlSourceTester.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/io/sources/debezium/DebeziumMySqlSourceTester.java @@ -49,7 +49,8 @@ public class DebeziumMySqlSourceTester extends SourceTester { + try { + admin.topics().createPartitionedTopic(topic, 10); + } catch (Exception e) { + log.error("Failed to create partitioned topic {}.", topic, e); + Assert.fail("Failed to create partitioned topic " + topic); + } + Assert.assertEquals(admin.topics().getPartitionedTopicMetadata(topic).partitions, 10); + }); + log.info("Test geo-replication produce and consume for topic {}.", topic); + + @Cleanup + PulsarClient client1 = PulsarClient.builder() + .serviceUrl(getGeoCluster().getClusters()[0].getPlainTextServiceUrl()) + .build(); + + @Cleanup + PulsarClient client2 = PulsarClient.builder() + .serviceUrl(getGeoCluster().getClusters()[1].getPlainTextServiceUrl()) + .build(); + + @Cleanup + Producer p = client1.newProducer() + .topic(topic) + .create(); + log.info("Successfully create producer in cluster {} for topic {}.", cluster1, topic); + + @Cleanup + Consumer c = client2.newConsumer() + .topic(topic) + .subscriptionName("geo-sub") + .subscribe(); + log.info("Successfully create consumer in cluster {} for topic {}.", cluster2, topic); + + for (int i = 0; i < 10; i++) { + p.send(String.format("Message [%d]", i).getBytes(StandardCharsets.UTF_8)); + } + log.info("Successfully produce message to cluster {} for topic {}.", cluster1, topic); + + for (int i = 0; i < 10; i++) { + Message message = c.receive(10, TimeUnit.SECONDS); + Assert.assertNotNull(message); + } + log.info("Successfully consume message from cluster {} for topic {}.", cluster2, topic); + } +} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java index ad7a8fdf1d048..f5a320e582072 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/offload/TestBaseOffload.java @@ -48,7 +48,7 @@ private static byte[] buildEntry(String pattern) { return entry; } - public void testPublishOffloadAndConsumeViaCLI(String serviceUrl, String adminUrl) throws Exception { + protected void testPublishOffloadAndConsumeViaCLI(String serviceUrl, String adminUrl) throws Exception { final String tenant = "offload-test-cli-" + randomName(4); final String namespace = tenant + "/ns1"; final String topic = "persistent://" + namespace + "/topic1"; @@ -120,7 +120,7 @@ public void testPublishOffloadAndConsumeViaCLI(String serviceUrl, String adminUr } } - public void testPublishOffloadAndConsumeViaThreshold(String serviceUrl, String adminUrl) throws Exception { + protected void testPublishOffloadAndConsumeViaThreshold(String serviceUrl, String adminUrl) throws Exception { final String tenant = "offload-test-threshold-" + randomName(4); final String namespace = tenant + "/ns1"; final String topic = "persistent://" + namespace + "/topic1"; @@ -240,7 +240,7 @@ public boolean ledgerExistsInBookKeeper(long ledgerId) throws Exception { } } - public void testPublishOffloadAndConsumeDeletionLag(String serviceUrl, String adminUrl) throws Exception { + protected void testPublishOffloadAndConsumeDeletionLag(String serviceUrl, String adminUrl) throws Exception { final String tenant = "offload-test-deletion-lag-" + randomName(4); final String namespace = tenant + "/ns1"; final String topic = "persistent://" + namespace + "/topic1"; diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestBasicPresto.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestBasicPresto.java index 91043904076be..62f59c3f36f58 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestBasicPresto.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestBasicPresto.java @@ -27,7 +27,6 @@ import lombok.extern.slf4j.Slf4j; import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.impl.schema.AvroSchema; @@ -35,6 +34,7 @@ import org.apache.pulsar.client.impl.schema.KeyValueSchemaImpl; import org.apache.pulsar.client.impl.schema.ProtobufNativeSchema; import org.apache.pulsar.common.naming.TopicName; +import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.common.schema.KeyValue; import org.apache.pulsar.common.schema.KeyValueEncodingType; import org.apache.pulsar.common.schema.SchemaType; @@ -56,6 +56,7 @@ public class TestBasicPresto extends TestPulsarSQLBase { private void setupPresto() throws Exception { log.info("[TestBasicPresto] setupPresto..."); pulsarCluster.startPrestoWorker(); + initJdbcConnection(); } private void teardownPresto() { @@ -161,31 +162,26 @@ protected int prepareData(TopicName topicName, boolean useNsOffloadPolices, Schema schema, CompressionType compressionType) throws Exception { - @Cleanup - PulsarClient pulsarClient = PulsarClient.builder() - .serviceUrl(pulsarCluster.getPlainTextServiceUrl()) - .build(); if (schema.getSchemaInfo().getName().equals(Schema.BYTES.getSchemaInfo().getName())) { - prepareDataForBytesSchema(pulsarClient, topicName, isBatch, compressionType); + prepareDataForBytesSchema(topicName, isBatch, compressionType); } else if (schema.getSchemaInfo().getName().equals(Schema.BYTEBUFFER.getSchemaInfo().getName())) { - prepareDataForByteBufferSchema(pulsarClient, topicName, isBatch, compressionType); + prepareDataForByteBufferSchema(topicName, isBatch, compressionType); } else if (schema.getSchemaInfo().getType().equals(SchemaType.STRING)) { - prepareDataForStringSchema(pulsarClient, topicName, isBatch, compressionType); + prepareDataForStringSchema(topicName, isBatch, compressionType); } else if (schema.getSchemaInfo().getType().equals(SchemaType.JSON) || schema.getSchemaInfo().getType().equals(SchemaType.AVRO)) { - prepareDataForStructSchema(pulsarClient, topicName, isBatch, schema, compressionType); + prepareDataForStructSchema(topicName, isBatch, schema, compressionType); } else if (schema.getSchemaInfo().getType().equals(SchemaType.PROTOBUF_NATIVE)) { - prepareDataForProtobufNativeSchema(pulsarClient, topicName, isBatch, schema, compressionType); + prepareDataForProtobufNativeSchema(topicName, isBatch, schema, compressionType); } else if (schema.getSchemaInfo().getType().equals(SchemaType.KEY_VALUE)) { - prepareDataForKeyValueSchema(pulsarClient, topicName, schema, compressionType); + prepareDataForKeyValueSchema(topicName, schema, compressionType); } return NUM_OF_STOCKS; } - private void prepareDataForBytesSchema(PulsarClient pulsarClient, - TopicName topicName, + private void prepareDataForBytesSchema(TopicName topicName, boolean isBatch, CompressionType compressionType) throws PulsarClientException { @Cleanup @@ -201,8 +197,7 @@ private void prepareDataForBytesSchema(PulsarClient pulsarClient, producer.flush(); } - private void prepareDataForByteBufferSchema(PulsarClient pulsarClient, - TopicName topicName, + private void prepareDataForByteBufferSchema(TopicName topicName, boolean isBatch, CompressionType compressionType) throws PulsarClientException { @Cleanup @@ -218,8 +213,7 @@ private void prepareDataForByteBufferSchema(PulsarClient pulsarClient, producer.flush(); } - private void prepareDataForStringSchema(PulsarClient pulsarClient, - TopicName topicName, + private void prepareDataForStringSchema(TopicName topicName, boolean isBatch, CompressionType compressionType) throws PulsarClientException { @Cleanup @@ -235,8 +229,7 @@ private void prepareDataForStringSchema(PulsarClient pulsarClient, producer.flush(); } - private void prepareDataForStructSchema(PulsarClient pulsarClient, - TopicName topicName, + private void prepareDataForStructSchema(TopicName topicName, boolean isBatch, Schema schema, CompressionType compressionType) throws Exception { @@ -254,8 +247,7 @@ private void prepareDataForStructSchema(PulsarClient pulsarClient, producer.flush(); } - private void prepareDataForProtobufNativeSchema(PulsarClient pulsarClient, - TopicName topicName, + private void prepareDataForProtobufNativeSchema(TopicName topicName, boolean isBatch, Schema schema, CompressionType compressionType) throws Exception { @@ -274,8 +266,7 @@ private void prepareDataForProtobufNativeSchema(PulsarClient pulsarClient, producer.flush(); } - private void prepareDataForKeyValueSchema(PulsarClient pulsarClient, - TopicName topicName, + private void prepareDataForKeyValueSchema(TopicName topicName, Schema> schema, CompressionType compressionType) throws Exception { @Cleanup @@ -342,4 +333,33 @@ private void validateContentForKeyValueSchema(int messageNum, String[] contentAr } } + @Test(timeOut = 1000 * 30) + public void testQueueBigEntry() throws Exception { + String tableName = "big_data_" + randomName(5); + String topic = "persistent://public/default/" + tableName; + + @Cleanup + Producer producer = pulsarClient.newProducer(Schema.BYTES) + .topic(topic) + .enableBatching(false) + .create(); + + // Make sure that the data length bigger than the default maxMessageSize + int dataLength = Commands.DEFAULT_MAX_MESSAGE_SIZE + 2 * 1024 * 1024; + Assert.assertTrue(dataLength < pulsarCluster.getSpec().maxMessageSize()); + byte[] data = new byte[dataLength]; + for (int i = 0; i < dataLength; i++) { + data[i] = 'a'; + } + + int messageCnt = 5; + log.info("start produce big entry data, data length: {}", dataLength); + for (int i = 0 ; i < messageCnt; ++i) { + producer.newMessage().value(data).send(); + } + + int count = selectCount("public/default", tableName); + Assert.assertEquals(count, messageCnt); + } + } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPrestoQueryTieredStorage.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPrestoQueryTieredStorage.java index 881dbe4bd0d41..7e4aae47bf0d5 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPrestoQueryTieredStorage.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPrestoQueryTieredStorage.java @@ -29,7 +29,6 @@ import org.apache.pulsar.client.api.CompressionType; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Producer; -import org.apache.pulsar.client.api.PulsarClient; import org.apache.pulsar.client.api.Schema; import org.apache.pulsar.client.api.SubscriptionInitialPosition; import org.apache.pulsar.client.impl.MessageIdImpl; @@ -40,11 +39,8 @@ import org.apache.pulsar.tests.integration.containers.S3Container; import org.testcontainers.shaded.org.apache.commons.lang.StringUtils; import org.testng.Assert; -import org.testng.annotations.AfterClass; -import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; - /** * Test presto query from tiered storage, the Pulsar SQL is cluster mode. */ @@ -89,6 +85,7 @@ private void setupExtraContainers() throws Exception { String offloadProperties = getOffloadProperties(BUCKET, null, ENDPOINT); pulsarCluster.startPrestoWorker(OFFLOAD_DRIVER, offloadProperties); pulsarCluster.startPrestoFollowWorkers(1, OFFLOAD_DRIVER, offloadProperties); + initJdbcConnection(); } private String getOffloadProperties(String bucket, String region, String endpoint) { @@ -136,11 +133,6 @@ protected int prepareData(TopicName topicName, Schema schema, CompressionType compressionType) throws Exception { @Cleanup - PulsarClient pulsarClient = PulsarClient.builder() - .serviceUrl(pulsarCluster.getPlainTextServiceUrl()) - .build(); - - @Cleanup Consumer consumer = pulsarClient.newConsumer(JSONSchema.of(Stock.class)) .topic(topicName.toString()) .subscriptionName("test") diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLBase.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLBase.java index 026a32d1c8ec6..0626e3522e8c5 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLBase.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/presto/TestPulsarSQLBase.java @@ -21,8 +21,6 @@ import static org.assertj.core.api.Assertions.assertThat; import com.google.common.base.Stopwatch; -import java.sql.Connection; -import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; @@ -196,9 +194,6 @@ private void validateData(TopicName topicName, int messageNum, Schema schema) th ); // test predicate pushdown - String url = String.format("jdbc:presto://%s", pulsarCluster.getPrestoWorkerContainer().getUrl()); - Connection connection = DriverManager.getConnection(url, "test", null); - String query = String.format("select * from pulsar" + ".\"%s\".\"%s\" order by __publish_time__", namespace, topic); log.info("Executing query: {}", query); @@ -267,11 +262,7 @@ private void validateData(TopicName topicName, int messageNum, Schema schema) th log.info("Executing query: result for topic {} returnedTimestamps size: {}", topic, returnedTimestamps.size()); assertThat(returnedTimestamps.size()).isEqualTo(0); - query = String.format("select count(*) from pulsar.\"%s\".\"%s\"", namespace, topic); - log.info("Executing query: {}", query); - res = connection.createStatement().executeQuery(query); - res.next(); - int count = res.getInt("_col0"); + int count = selectCount(namespace, topic); assertThat(count).isGreaterThan(messageNum - 2); } @@ -304,5 +295,12 @@ private static void printCurrent(ResultSet rs) throws SQLException { } + protected int selectCount(String namespace, String tableName) throws SQLException { + String query = String.format("select count(*) from pulsar.\"%s\".\"%s\"", namespace, tableName); + log.info("Executing count query: {}", query); + ResultSet res = connection.createStatement().executeQuery(query); + res.next(); + return res.getInt("_col0"); + } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarSQLTestSuite.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarSQLTestSuite.java index 9ed733570452f..902de4dd4ad8c 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarSQLTestSuite.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/suites/PulsarSQLTestSuite.java @@ -18,13 +18,23 @@ */ package org.apache.pulsar.tests.integration.suites; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; import java.util.HashMap; import java.util.Map; + import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.client.api.PulsarClient; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.tests.integration.containers.BrokerContainer; import org.apache.pulsar.tests.integration.containers.S3Container; import org.apache.pulsar.tests.integration.topologies.PulsarClusterSpec; +/** + * Pulsar SQL test suite. + */ @Slf4j public abstract class PulsarSQLTestSuite extends PulsarTestSuite { @@ -33,11 +43,15 @@ public abstract class PulsarSQLTestSuite extends PulsarTestSuite { public static final String BUCKET = "pulsar-integtest"; public static final String ENDPOINT = "http://" + S3Container.NAME + ":9090"; + protected Connection connection = null; + protected PulsarClient pulsarClient = null; + @Override protected PulsarClusterSpec.PulsarClusterSpecBuilder beforeSetupCluster(String clusterName, PulsarClusterSpec.PulsarClusterSpecBuilder specBuilder) { specBuilder.queryLastMessage(true); specBuilder.clusterName("pulsar-sql-test"); specBuilder.numBrokers(1); + specBuilder.maxMessageSize(2 * Commands.DEFAULT_MAX_MESSAGE_SIZE); return super.beforeSetupCluster(clusterName, specBuilder); } @@ -55,4 +69,43 @@ protected void beforeStartCluster() throws Exception { } } + @Override + public void setupCluster() throws Exception { + super.setupCluster(); + pulsarClient = PulsarClient.builder() + .serviceUrl(pulsarCluster.getPlainTextServiceUrl()) + .build(); + } + + protected void initJdbcConnection() throws SQLException { + if (pulsarCluster.getPrestoWorkerContainer() == null) { + log.error("The presto work container isn't exist."); + return; + } + String url = String.format("jdbc:presto://%s", pulsarCluster.getPrestoWorkerContainer().getUrl()); + connection = DriverManager.getConnection(url, "test", null); + } + + @Override + public void tearDownCluster() throws Exception { + close(); + super.tearDownCluster(); + } + + protected void close() { + if (connection != null) { + try { + connection.close(); + } catch (SQLException e) { + log.error("Failed to close sql connection.", e); + } + } + if (pulsarClient != null) { + try { + pulsarClient.close(); + } catch (PulsarClientException e) { + log.error("Failed to close pulsar client.", e); + } + } + } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java index 043762ce1f761..7117cbb1f9e36 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarCluster.java @@ -70,9 +70,17 @@ public class PulsarCluster { * @return the built pulsar cluster */ public static PulsarCluster forSpec(PulsarClusterSpec spec) { - return new PulsarCluster(spec); + CSContainer csContainer = new CSContainer(spec.clusterName) + .withNetwork(Network.newNetwork()) + .withNetworkAliases(CSContainer.NAME); + return new PulsarCluster(spec, csContainer, false); } + public static PulsarCluster forSpec(PulsarClusterSpec spec, CSContainer csContainer) { + return new PulsarCluster(spec, csContainer, true); + } + + @Getter private final PulsarClusterSpec spec; @Getter @@ -80,6 +88,7 @@ public static PulsarCluster forSpec(PulsarClusterSpec spec) { private final Network network; private final ZKContainer zkContainer; private final CSContainer csContainer; + private final boolean sharedCsContainer; private final Map bookieContainers; private final Map brokerContainers; private final Map workerContainers; @@ -90,11 +99,12 @@ public static PulsarCluster forSpec(PulsarClusterSpec spec) { private Map> externalServices = Collections.emptyMap(); private final boolean enablePrestoWorker; - private PulsarCluster(PulsarClusterSpec spec) { + private PulsarCluster(PulsarClusterSpec spec, CSContainer csContainer, boolean sharedCsContainer) { this.spec = spec; + this.sharedCsContainer = sharedCsContainer; this.clusterName = spec.clusterName(); - this.network = Network.newNetwork(); + this.network = csContainer.getNetwork(); this.enablePrestoWorker = spec.enablePrestoWorker(); this.sqlFollowWorkerContainers = Maps.newTreeMap(); @@ -109,26 +119,24 @@ private PulsarCluster(PulsarClusterSpec spec) { this.zkContainer = new ZKContainer(clusterName); this.zkContainer .withNetwork(network) - .withNetworkAliases(ZKContainer.NAME) + .withNetworkAliases(appendClusterName(ZKContainer.NAME)) .withEnv("clusterName", clusterName) - .withEnv("zkServers", ZKContainer.NAME) + .withEnv("zkServers", appendClusterName(ZKContainer.NAME)) .withEnv("configurationStore", CSContainer.NAME + ":" + CS_PORT) .withEnv("forceSync", "no") - .withEnv("pulsarNode", "pulsar-broker-0"); + .withEnv("pulsarNode", appendClusterName("pulsar-broker-0")); - this.csContainer = new CSContainer(clusterName) - .withNetwork(network) - .withNetworkAliases(CSContainer.NAME); + this.csContainer = csContainer; this.bookieContainers = Maps.newTreeMap(); this.brokerContainers = Maps.newTreeMap(); this.workerContainers = Maps.newTreeMap(); - this.proxyContainer = new ProxyContainer(clusterName, ProxyContainer.NAME) + this.proxyContainer = new ProxyContainer(appendClusterName("pulsar-proxy"), ProxyContainer.NAME) .withNetwork(network) - .withNetworkAliases("pulsar-proxy") - .withEnv("zkServers", ZKContainer.NAME) - .withEnv("zookeeperServers", ZKContainer.NAME) + .withNetworkAliases(appendClusterName("pulsar-proxy")) + .withEnv("zkServers", appendClusterName(ZKContainer.NAME)) + .withEnv("zookeeperServers", appendClusterName(ZKContainer.NAME)) .withEnv("configurationStoreServers", CSContainer.NAME + ":" + CS_PORT) .withEnv("clusterName", clusterName); if (spec.proxyEnvs != null) { @@ -142,31 +150,33 @@ private PulsarCluster(PulsarClusterSpec spec) { bookieContainers.putAll( runNumContainers("bookie", spec.numBookies(), (name) -> new BKContainer(clusterName, name) .withNetwork(network) - .withNetworkAliases(name) - .withEnv("zkServers", ZKContainer.NAME) + .withNetworkAliases(appendClusterName(name)) + .withEnv("zkServers", appendClusterName(ZKContainer.NAME)) .withEnv("useHostNameAsBookieID", "true") // Disable fsyncs for tests since they're slow within the containers .withEnv("journalSyncData", "false") .withEnv("journalMaxGroupWaitMSec", "0") .withEnv("clusterName", clusterName) .withEnv("diskUsageThreshold", "0.99") + .withEnv("nettyMaxFrameSizeBytes", "" + spec.maxMessageSize) ) ); // create brokers brokerContainers.putAll( runNumContainers("broker", spec.numBrokers(), (name) -> { - BrokerContainer brokerContainer = new BrokerContainer(clusterName, name) + BrokerContainer brokerContainer = new BrokerContainer(clusterName, appendClusterName(name)) .withNetwork(network) - .withNetworkAliases(name) - .withEnv("zkServers", ZKContainer.NAME) - .withEnv("zookeeperServers", ZKContainer.NAME) + .withNetworkAliases(appendClusterName(name)) + .withEnv("zkServers", appendClusterName(ZKContainer.NAME)) + .withEnv("zookeeperServers", appendClusterName(ZKContainer.NAME)) .withEnv("configurationStoreServers", CSContainer.NAME + ":" + CS_PORT) .withEnv("clusterName", clusterName) .withEnv("brokerServiceCompactionMonitorIntervalInSeconds", "1") // used in s3 tests .withEnv("AWS_ACCESS_KEY_ID", "accesskey") - .withEnv("AWS_SECRET_KEY", "secretkey"); + .withEnv("AWS_SECRET_KEY", "secretkey") + .withEnv("maxMessageSize", "" + spec.maxMessageSize); if (spec.queryLastMessage) { brokerContainer.withEnv("bookkeeperExplicitLacIntervalInMills", "10"); brokerContainer.withEnv("bookkeeperUseV2WireProtocol", "false"); @@ -235,8 +245,10 @@ public void start() throws Exception { log.info("Successfully started local zookeeper container."); // start the configuration store - csContainer.start(); - log.info("Successfully started configuration store container."); + if (!sharedCsContainer) { + csContainer.start(); + log.info("Successfully started configuration store container."); + } // init the cluster zkContainer.execCmd( @@ -335,9 +347,11 @@ public synchronized void stop() { if (null != proxyContainer) { containers.add(proxyContainer); } - if (null != csContainer) { + + if (!sharedCsContainer && null != csContainer) { containers.add(csContainer); } + if (null != zkContainer) { containers.add(zkContainer); } @@ -420,6 +434,7 @@ private PrestoWorkerContainer buildPrestoWorkerContainer(String hostName, boolea .withEnv("zookeeperServers", ZKContainer.NAME + ":" + ZKContainer.ZK_PORT) .withEnv("pulsar.zookeeper-uri", ZKContainer.NAME + ":" + ZKContainer.ZK_PORT) .withEnv("pulsar.web-service-url", "http://pulsar-broker-0:8080") + .withEnv("SQL_PREFIX_pulsar.max-message-size", "" + spec.maxMessageSize) .withClasspathResourceMapping( resourcePath, "/pulsar/conf/presto/config.properties", BindMode.READ_WRITE); if (spec.queryLastMessage) { @@ -668,4 +683,8 @@ public void dumpFunctionLogs(String name) { } } } + + private String appendClusterName(String name) { + return sharedCsContainer ? clusterName + "-" + name : name; + } } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java index 9dcfcfb725e79..eed604205bdce 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarClusterSpec.java @@ -29,6 +29,7 @@ import lombok.Singular; import lombok.experimental.Accessors; +import org.apache.pulsar.common.protocol.Commands; import org.apache.pulsar.tests.integration.containers.PulsarContainer; import org.testcontainers.containers.GenericContainer; @@ -152,4 +153,7 @@ public class PulsarClusterSpec { * Specify mount files. */ Map brokerMountFiles; + + @Default + int maxMessageSize = Commands.DEFAULT_MAX_MESSAGE_SIZE; } diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarGeoCluster.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarGeoCluster.java new file mode 100644 index 0000000000000..9be3c382b7035 --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarGeoCluster.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.topologies; + +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +import org.apache.pulsar.tests.integration.containers.CSContainer; +import org.testcontainers.containers.Network; + +@Slf4j +public class PulsarGeoCluster { + + @Getter + private final PulsarClusterSpec[] clusterSpecs; + + @Getter + private final CSContainer csContainer; + + @Getter + private final PulsarCluster[] clusters; + + /** + * Pulsar Cluster Spec + * + * @param specs each pulsar cluster spec. + * @return the built a pulsar cluster with geo replication + */ + public static PulsarGeoCluster forSpec(PulsarClusterSpec... specs) { + return new PulsarGeoCluster(specs); + } + + public PulsarGeoCluster(PulsarClusterSpec... clusterSpecs) { + this.clusterSpecs = clusterSpecs; + this.clusters = new PulsarCluster[clusterSpecs.length]; + + this.csContainer = new CSContainer("geo-cluster") + .withNetwork(Network.newNetwork()) + .withNetworkAliases(CSContainer.NAME); + + for (int i = 0; i < this.clusters.length; i++) { + clusters[i] = PulsarCluster.forSpec(this.clusterSpecs[i], this.csContainer); + } + } + + public void start() throws Exception { + // start the configuration store + this.csContainer.start(); + log.info("Successfully started configuration store container."); + + for (PulsarCluster cluster : clusters) { + cluster.start(); + log.info("Successfully started all components for cluster {}.", cluster.getClusterName()); + } + } + + public void stop() throws Exception { + for (PulsarCluster cluster : clusters) { + cluster.stop(); + log.info("Successfully stopped all components for cluster {}.", cluster.getClusterName()); + } + // stop the configuration store + this.csContainer.stop(); + log.info("Successfully stopped configuration store container."); + } + +} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarGeoClusterTestBase.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarGeoClusterTestBase.java new file mode 100644 index 0000000000000..51c74eee50b18 --- /dev/null +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarGeoClusterTestBase.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.tests.integration.topologies; + +import static java.util.stream.Collectors.joining; +import java.util.stream.Stream; + +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class PulsarGeoClusterTestBase extends PulsarTestBase { + + @Override + protected final void setup() throws Exception { + setupCluster(); + } + + @Override + protected final void cleanup() throws Exception { + tearDownCluster(); + } + + protected void setupCluster() throws Exception { + this.setupCluster(""); + } + + @Getter + private PulsarGeoCluster geoCluster; + + public void setupCluster(String namePrefix) throws Exception { + PulsarClusterSpec.PulsarClusterSpecBuilder[] specBuilders = new PulsarClusterSpec.PulsarClusterSpecBuilder[2]; + for (int i = 0; i < 2; i++) { + String clusterName = Stream.of(this.getClass().getSimpleName(), namePrefix, String.valueOf(i), + randomName(5)) + .filter(s -> s != null && !s.isEmpty()) + .collect(joining("-")); + specBuilders[i] = PulsarClusterSpec.builder().clusterName(clusterName); + } + specBuilders = beforeSetupCluster(specBuilders); + PulsarClusterSpec[] specs = new PulsarClusterSpec[2]; + for (int i = 0; i < specBuilders.length; i++) { + specs[i] = specBuilders[i].build(); + } + setupCluster0(specs); + } + + protected PulsarClusterSpec.PulsarClusterSpecBuilder[] beforeSetupCluster ( + PulsarClusterSpec.PulsarClusterSpecBuilder... specBuilder) { + return specBuilder; + } + + protected void setupCluster0(PulsarClusterSpec... specs) throws Exception { + incrementSetupNumber(); + log.info("Setting up geo cluster with {} local clusters}", specs.length); + + this.geoCluster = PulsarGeoCluster.forSpec(specs); + + beforeStartCluster(); + + this.geoCluster.start(); + + log.info("Geo Cluster is setup!"); + } + + protected void beforeStartCluster() throws Exception { + // no-op + } + + public void tearDownCluster() throws Exception { + markCurrentSetupNumberCleaned(); + if (null != this.geoCluster) { + this.geoCluster.stop(); + } + } +} diff --git a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarTestBase.java b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarTestBase.java index 9989b15faa95e..778c67b27b538 100644 --- a/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarTestBase.java +++ b/tests/integration/src/test/java/org/apache/pulsar/tests/integration/topologies/PulsarTestBase.java @@ -34,9 +34,18 @@ import org.apache.pulsar.common.util.FutureUtil; import org.apache.pulsar.tests.TestRetrySupport; import org.testng.Assert; +import org.testng.annotations.DataProvider; public abstract class PulsarTestBase extends TestRetrySupport { + @DataProvider(name = "TopicDomain") + public Object[][] topicDomain() { + return new Object[][] { + {"persistent"}, + {"non-persistent"} + }; + } + public static String randomName(int numChars) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < numChars; i++) { @@ -67,7 +76,7 @@ protected static String generateTopicName(String namespace, String topicPrefix, } } - public void testPublishAndConsume(String serviceUrl, boolean isPersistent) throws Exception { + protected void testPublishAndConsume(String serviceUrl, boolean isPersistent) throws Exception { String topicName = generateTopicName("testpubconsume", isPersistent); int numMessages = 10; @@ -98,7 +107,7 @@ public void testPublishAndConsume(String serviceUrl, boolean isPersistent) throw } } - public void testBatchMessagePublishAndConsume(String serviceUrl, boolean isPersistent) throws Exception { + protected void testBatchMessagePublishAndConsume(String serviceUrl, boolean isPersistent) throws Exception { String topicName = generateTopicName("test-batch-publish-consume", isPersistent); final int numMessages = 10000; @@ -133,7 +142,7 @@ public void testBatchMessagePublishAndConsume(String serviceUrl, boolean isPersi } } - public void testBatchIndexAckDisabled(String serviceUrl) throws Exception { + protected void testBatchIndexAckDisabled(String serviceUrl) throws Exception { String topicName = generateTopicName("test-batch-index-ack-disabled", true); final int numMessages = 100; try (PulsarClient client = PulsarClient.builder() diff --git a/tests/integration/src/test/resources/pulsar-io-sources.xml b/tests/integration/src/test/resources/pulsar-io-sources.xml index a5afc5d8d0322..636b3e479195f 100644 --- a/tests/integration/src/test/resources/pulsar-io-sources.xml +++ b/tests/integration/src/test/resources/pulsar-io-sources.xml @@ -23,6 +23,8 @@ + + \ No newline at end of file diff --git a/tests/integration/src/test/resources/pulsar-messaging.xml b/tests/integration/src/test/resources/pulsar-messaging.xml index aa31852fd6f62..cfbdb22587034 100644 --- a/tests/integration/src/test/resources/pulsar-messaging.xml +++ b/tests/integration/src/test/resources/pulsar-messaging.xml @@ -24,9 +24,11 @@ + + + - \ No newline at end of file diff --git a/tests/integration/src/test/resources/pulsar-python.xml b/tests/integration/src/test/resources/pulsar-python.xml new file mode 100644 index 0000000000000..a5faa6389e0f1 --- /dev/null +++ b/tests/integration/src/test/resources/pulsar-python.xml @@ -0,0 +1,28 @@ + + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-schema.xml b/tests/integration/src/test/resources/pulsar-schema.xml index c24b4fae0ccb1..e07fdf2b2d86f 100644 --- a/tests/integration/src/test/resources/pulsar-schema.xml +++ b/tests/integration/src/test/resources/pulsar-schema.xml @@ -24,6 +24,7 @@ + diff --git a/tests/integration/src/test/resources/pulsar-semantics.xml b/tests/integration/src/test/resources/pulsar-semantics.xml new file mode 100644 index 0000000000000..5b5402af4623b --- /dev/null +++ b/tests/integration/src/test/resources/pulsar-semantics.xml @@ -0,0 +1,28 @@ + + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar-upgrade.xml b/tests/integration/src/test/resources/pulsar-upgrade.xml new file mode 100644 index 0000000000000..a52db54753372 --- /dev/null +++ b/tests/integration/src/test/resources/pulsar-upgrade.xml @@ -0,0 +1,28 @@ + + + + + + + + + diff --git a/tests/integration/src/test/resources/pulsar.xml b/tests/integration/src/test/resources/pulsar.xml index 7993d0c6cd238..5382d9b5f5bef 100644 --- a/tests/integration/src/test/resources/pulsar.xml +++ b/tests/integration/src/test/resources/pulsar.xml @@ -36,5 +36,8 @@ + + + diff --git a/tests/pom.xml b/tests/pom.xml index df6f2c1b411c2..a45e2c80ceed9 100644 --- a/tests/pom.xml +++ b/tests/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 org.apache.pulsar.tests tests-parent diff --git a/tests/pulsar-client-admin-shade-test/pom.xml b/tests/pulsar-client-admin-shade-test/pom.xml index 75b31765f40a4..bc1e44eb72fab 100644 --- a/tests/pulsar-client-admin-shade-test/pom.xml +++ b/tests/pulsar-client-admin-shade-test/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 pulsar-client-admin-shade-test diff --git a/tests/pulsar-client-all-shade-test/pom.xml b/tests/pulsar-client-all-shade-test/pom.xml index c5b84a933e83a..1bdf4cee29e5e 100644 --- a/tests/pulsar-client-all-shade-test/pom.xml +++ b/tests/pulsar-client-all-shade-test/pom.xml @@ -26,7 +26,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 pulsar-client-all-shade-test diff --git a/tests/pulsar-client-shade-test/pom.xml b/tests/pulsar-client-shade-test/pom.xml index 438fd758474ee..85fccd3178f5d 100644 --- a/tests/pulsar-client-shade-test/pom.xml +++ b/tests/pulsar-client-shade-test/pom.xml @@ -27,7 +27,7 @@ org.apache.pulsar.tests tests-parent - 2.9.0-SNAPSHOT + 2.9.3 pulsar-client-shade-test diff --git a/tiered-storage/file-system/pom.xml b/tiered-storage/file-system/pom.xml index c261a9f0b7b01..25c6d1d0f4f1d 100644 --- a/tiered-storage/file-system/pom.xml +++ b/tiered-storage/file-system/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar tiered-storage-parent - 2.9.0-SNAPSHOT + 2.9.3 .. @@ -46,6 +46,16 @@ org.apache.hadoop hadoop-common ${hdfs-offload-version3} + + + log4j + log4j + + + org.slf4j + slf4j-log4j12 + + com.google.protobuf diff --git a/tiered-storage/jcloud/pom.xml b/tiered-storage/jcloud/pom.xml index 265743c7e376e..9299ddda4f6ac 100644 --- a/tiered-storage/jcloud/pom.xml +++ b/tiered-storage/jcloud/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar tiered-storage-parent - 2.9.0-SNAPSHOT + 2.9.3 .. diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java index 6a204d56de951..e3fc68ab7e218 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedInputStreamImpl.java @@ -141,4 +141,9 @@ public void seekForward(long position) throws IOException { public void close() { buffer.release(); } + + @Override + public int available() throws IOException { + return (int)(objectLen - cursor) + buffer.readableBytes(); + } } diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java index 2bf380d8c4126..6675e084e97bd 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImpl.java @@ -36,6 +36,7 @@ import org.apache.bookkeeper.client.api.ReadHandle; import org.apache.bookkeeper.client.impl.LedgerEntriesImpl; import org.apache.bookkeeper.client.impl.LedgerEntryImpl; +import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.offload.jcloud.BackedInputStream; import org.apache.bookkeeper.mledger.offload.jcloud.OffloadIndexBlock; import org.apache.bookkeeper.mledger.offload.jcloud.OffloadIndexBlockBuilder; @@ -103,8 +104,16 @@ public CompletableFuture closeAsync() { public CompletableFuture readAsync(long firstEntry, long lastEntry) { log.debug("Ledger {}: reading {} - {}", getId(), firstEntry, lastEntry); CompletableFuture promise = new CompletableFuture<>(); - executor.submit(() -> { + executor.execute(() -> { + if (state == State.Closed) { + log.warn("Reading a closed read handler. Ledger ID: {}, Read range: {}-{}", + ledgerId, firstEntry, lastEntry); + promise.completeExceptionally(new ManagedLedgerException.OffloadReadHandleClosedException()); + return; + } + List entries = new ArrayList(); + boolean seeked = false; try { if (firstEntry > lastEntry || firstEntry < 0 @@ -115,20 +124,15 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr long entriesToRead = (lastEntry - firstEntry) + 1; long nextExpectedId = firstEntry; - // seek the position to the first entry position, otherwise we will get the unexpected entry ID when doing - // the first read, that would cause read an unexpected entry id which is out of range between firstEntry - // and lastEntry - // for example, when we get 1-10 entries at first, then the next request is get 2-9, the following code - // will read the entry id from the stream and that is not the correct entry id, so it will seek to the - // correct position then read the stream as normal. But the entry id may exceed the last entry id, that - // will cause we are hardly to know the edge of the request range. - inputStream.seek(index.getIndexEntryForEntry(firstEntry).getDataOffset()); + // checking the data stream has enough data to read to avoid throw EOF exception when reading data. + // 12 bytes represent the stream have the length and entryID to read. + if (dataStream.available() < 12) { + log.warn("There hasn't enough data to read, current available data has {} bytes," + + " seek to the first entry {} to avoid EOF exception", inputStream.available(), firstEntry); + inputStream.seek(index.getIndexEntryForEntry(firstEntry).getDataOffset()); + } while (entriesToRead > 0) { - if (state == State.Closed) { - log.warn("Reading a closed read handler. Ledger ID: {}, Read range: {}-{}", ledgerId, firstEntry, lastEntry); - throw new BKException.BKUnexpectedConditionException(); - } int length = dataStream.readInt(); if (length < 0) { // hit padding or new block inputStream.seek(index.getIndexEntryForEntry(nextExpectedId).getDataOffset()); @@ -149,14 +153,20 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr log.warn("The read entry {} is not the expected entry {} but in the range of {} - {}," + " seeking to the right position", entryId, nextExpectedId, nextExpectedId, lastEntry); inputStream.seek(index.getIndexEntryForEntry(nextExpectedId).getDataOffset()); - continue; } else if (entryId < nextExpectedId && !index.getIndexEntryForEntry(nextExpectedId).equals(index.getIndexEntryForEntry(entryId))) { log.warn("Read an unexpected entry id {} which is smaller than the next expected entry id {}" - + ", seeking to the right position", entries, nextExpectedId); + + ", seeking to the right position", entryId, nextExpectedId); inputStream.seek(index.getIndexEntryForEntry(nextExpectedId).getDataOffset()); - continue; } else if (entryId > lastEntry) { + // in the normal case, the entry id should increment in order. But if there has random access in + // the read method, we should allow to seek to the right position and the entry id should + // never over to the last entry again. + if (!seeked) { + inputStream.seek(index.getIndexEntryForEntry(nextExpectedId).getDataOffset()); + seeked = true; + continue; + } log.info("Expected to read {}, but read {}, which is greater than last entry {}", nextExpectedId, entryId, lastEntry); throw new BKException.BKUnexpectedConditionException(); @@ -167,6 +177,8 @@ public CompletableFuture readAsync(long firstEntry, long lastEntr promise.complete(LedgerEntriesImpl.create(entries)); } catch (Throwable t) { + log.error("Failed to read entries {} - {} from the offloader in ledger {}", + firstEntry, lastEntry, ledgerId, t); promise.completeExceptionally(t); entries.forEach(LedgerEntry::close); } @@ -218,12 +230,32 @@ public static ReadHandle open(ScheduledExecutorService executor, VersionCheck versionCheck, long ledgerId, int readBufferSize) throws IOException { - Blob blob = blobStore.getBlob(bucket, indexKey); - versionCheck.check(indexKey, blob); - OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create(); - OffloadIndexBlock index; - try (InputStream payLoadStream = blob.getPayload().openStream()) { - index = (OffloadIndexBlock) indexBuilder.fromStream(payLoadStream); + int retryCount = 3; + OffloadIndexBlock index = null; + IOException lastException = null; + // The following retry is used to avoid to some network issue cause read index file failure. + // If it can not recovery in the retry, we will throw the exception and the dispatcher will schedule to + // next read. + // If we use a backoff to control the retry, it will introduce a concurrent operation. + // We don't want to make it complicated, because in the most of case it shouldn't in the retry loop. + while (retryCount-- > 0) { + Blob blob = blobStore.getBlob(bucket, indexKey); + versionCheck.check(indexKey, blob); + OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create(); + try (InputStream payLoadStream = blob.getPayload().openStream()) { + index = (OffloadIndexBlock) indexBuilder.fromStream(payLoadStream); + } catch (IOException e) { + // retry to avoid the network issue caused read failure + log.warn("Failed to get index block from the offoaded index file {}, still have {} times to retry", + indexKey, retryCount, e); + lastException = e; + continue; + } + lastException = null; + break; + } + if (lastException != null) { + throw lastException; } BackedInputStream inputStream = new BlobStoreBackedInputStreamImpl(blobStore, bucket, key, diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java index 0dba4b0fc6aa7..ac6f121e818ab 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreBackedReadHandleImplV2.java @@ -38,6 +38,7 @@ import org.apache.bookkeeper.client.api.ReadHandle; import org.apache.bookkeeper.client.impl.LedgerEntriesImpl; import org.apache.bookkeeper.client.impl.LedgerEntryImpl; +import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.offload.jcloud.BackedInputStream; import org.apache.bookkeeper.mledger.offload.jcloud.OffloadIndexBlockV2; import org.apache.bookkeeper.mledger.offload.jcloud.OffloadIndexBlockV2Builder; @@ -56,6 +57,12 @@ public class BlobStoreBackedReadHandleImplV2 implements ReadHandle { private final List inputStreams; private final List dataStreams; private final ExecutorService executor; + private State state = null; + + enum State { + Opened, + Closed + } static class GroupedReader { @Override @@ -97,6 +104,7 @@ private BlobStoreBackedReadHandleImplV2(long ledgerId, List dataStreams.add(new DataInputStream(inputStream)); } this.executor = executor; + this.state = State.Opened; } @Override @@ -121,6 +129,7 @@ public CompletableFuture closeAsync() { for (DataInputStream dataStream : dataStreams) { dataStream.close(); } + state = State.Closed; promise.complete(null); } catch (IOException t) { promise.completeExceptionally(t); @@ -133,13 +142,20 @@ public CompletableFuture closeAsync() { public CompletableFuture readAsync(long firstEntry, long lastEntry) { log.debug("Ledger {}: reading {} - {}", getId(), firstEntry, lastEntry); CompletableFuture promise = new CompletableFuture<>(); - if (firstEntry > lastEntry - || firstEntry < 0 - || lastEntry > getLastAddConfirmed()) { - promise.completeExceptionally(new IllegalArgumentException()); - return promise; - } - executor.submit(() -> { + executor.execute(() -> { + if (state == State.Closed) { + log.warn("Reading a closed read handler. Ledger ID: {}, Read range: {}-{}", + ledgerId, firstEntry, lastEntry); + promise.completeExceptionally(new ManagedLedgerException.OffloadReadHandleClosedException()); + return; + } + + if (firstEntry > lastEntry + || firstEntry < 0 + || lastEntry > getLastAddConfirmed()) { + promise.completeExceptionally(new BKException.BKIncorrectParameterException()); + return; + } List entries = new ArrayList(); List groupedReaders = null; try { diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java index f9c86fd2967cd..5fc2bf7d3b79c 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloader.java @@ -23,6 +23,7 @@ import com.google.common.collect.Lists; import java.io.IOException; import java.time.Duration; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -173,6 +174,7 @@ public CompletableFuture offload(ReadHandle readHandle, .withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize()); String dataBlockKey = DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid); String indexBlockKey = DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid); + log.info("ledger {} dataBlockKey {} indexBlockKey {}", readHandle.getId(), dataBlockKey, indexBlockKey); MultipartUpload mpu = null; List parts = Lists.newArrayList(); @@ -180,7 +182,12 @@ public CompletableFuture offload(ReadHandle readHandle, // init multi part upload for data block. try { BlobBuilder blobBuilder = writeBlobStore.blobBuilder(dataBlockKey); - DataBlockUtils.addVersionInfo(blobBuilder, userMetadata); + Map objectMetadata = new HashMap<>(userMetadata); + objectMetadata.put("role", "data"); + if (extraMetadata != null) { + objectMetadata.putAll(extraMetadata); + } + DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata); Blob blob = blobBuilder.build(); mpu = writeBlobStore.initiateMultipartUpload(config.getBucket(), blob.getMetadata(), new PutOptions()); } catch (Throwable t) { @@ -243,7 +250,12 @@ public CompletableFuture offload(ReadHandle readHandle, IndexInputStream indexStream = index.toStream()) { // write the index block BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey); - DataBlockUtils.addVersionInfo(blobBuilder, userMetadata); + Map objectMetadata = new HashMap<>(userMetadata); + objectMetadata.put("role", "index"); + if (extraMetadata != null) { + objectMetadata.putAll(extraMetadata); + } + DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata); Payload indexPayload = Payloads.newInputStreamPayload(indexStream); indexPayload.getContentMetadata().setContentLength((long) indexStream.getStreamSize()); indexPayload.getContentMetadata().setContentType("application/octet-stream"); diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamImpl.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamImpl.java index a4ffdea65098f..e5dbcb6434709 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamImpl.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamImpl.java @@ -20,6 +20,7 @@ import static com.google.common.base.Preconditions.checkState; import com.google.common.collect.Lists; +import com.google.common.primitives.Ints; import io.netty.buffer.ByteBuf; import io.netty.buffer.CompositeByteBuf; import java.io.IOException; @@ -27,6 +28,7 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.bookkeeper.client.api.LedgerEntries; import org.apache.bookkeeper.client.api.LedgerEntry; import org.apache.bookkeeper.client.api.ReadHandle; @@ -44,6 +46,9 @@ public class BlockAwareSegmentInputStreamImpl extends BlockAwareSegmentInputStre private static final Logger log = LoggerFactory.getLogger(BlockAwareSegmentInputStreamImpl.class); static final int[] BLOCK_END_PADDING = new int[]{ 0xFE, 0xDC, 0xDE, 0xAD }; + static final byte[] BLOCK_END_PADDING_BYTES = Ints.toByteArray(0xFEDCDEAD); + + private final ByteBuf paddingBuf = PulsarByteBufAllocator.DEFAULT.buffer(128, 128); private final ReadHandle ledger; private final long startEntryId; @@ -65,6 +70,9 @@ public class BlockAwareSegmentInputStreamImpl extends BlockAwareSegmentInputStre static final int ENTRY_HEADER_SIZE = 4 /* entry size */ + 8 /* entry id */; // Keep a list of all entries ByteBuf, each ByteBuf contains 2 buf: entry header and entry content. private List entriesByteBuf = null; + private int currentOffset = 0; + private final AtomicBoolean close = new AtomicBoolean(false); + public BlockAwareSegmentInputStreamImpl(ReadHandle ledger, long startEntryId, int blockSize) { this.ledger = ledger; @@ -76,6 +84,52 @@ public BlockAwareSegmentInputStreamImpl(ReadHandle ledger, long startEntryId, in this.entriesByteBuf = Lists.newLinkedList(); } + private ByteBuf readEntries(int len) throws IOException { + checkState(bytesReadOffset >= DataBlockHeaderImpl.getDataStartOffset()); + checkState(bytesReadOffset < blockSize); + + // once reach the end of entry buffer, read more, if there is more + if (bytesReadOffset < dataBlockFullOffset + && entriesByteBuf.isEmpty() + && startEntryId + blockEntryCount <= ledger.getLastAddConfirmed()) { + entriesByteBuf = readNextEntriesFromLedger(startEntryId + blockEntryCount, ENTRIES_PER_READ); + } + + if (!entriesByteBuf.isEmpty() + && bytesReadOffset + entriesByteBuf.get(0).readableBytes() <= blockSize) { + // always read from the first ByteBuf in the list, once read all of its content remove it. + ByteBuf entryByteBuf = entriesByteBuf.get(0); + int readableBytes = entryByteBuf.readableBytes(); + int read = Math.min(readableBytes, len); + ByteBuf buf = entryByteBuf.slice(currentOffset, read); + buf.retain(); + currentOffset += read; + entryByteBuf.readerIndex(currentOffset); + bytesReadOffset += read; + + if (entryByteBuf.readableBytes() == 0) { + entryByteBuf.release(); + entriesByteBuf.remove(0); + blockEntryCount++; + currentOffset = 0; + } + + return buf; + } else { + // no space for a new entry or there are no more entries + // set data block full, return end padding + if (dataBlockFullOffset == blockSize) { + dataBlockFullOffset = bytesReadOffset; + } + paddingBuf.clear(); + for (int i = 0; i < Math.min(len, paddingBuf.capacity()); i++) { + paddingBuf.writeByte(BLOCK_END_PADDING_BYTES[(bytesReadOffset++ - dataBlockFullOffset) + % BLOCK_END_PADDING_BYTES.length]); + } + return paddingBuf.retain(); + } + } + // read ledger entries. private int readEntries() throws IOException { checkState(bytesReadOffset >= DataBlockHeaderImpl.getDataStartOffset()); @@ -143,6 +197,46 @@ private List readNextEntriesFromLedger(long start, long maxNumberEntrie } } + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException("The given bytes are null"); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException("off=" + off + ", len=" + len + ", b.length=" + b.length); + } else if (len == 0) { + return 0; + } + + int offset = off; + int readLen = len; + int readBytes = 0; + // reading header + if (dataBlockHeaderStream.available() > 0) { + int read = dataBlockHeaderStream.read(b, off, len); + offset += read; + readLen -= read; + readBytes += read; + bytesReadOffset += read; + } + if (readLen == 0) { + return readBytes; + } + + // reading ledger entries + if (bytesReadOffset < blockSize) { + readLen = Math.min(readLen, blockSize - bytesReadOffset); + ByteBuf readEntries = readEntries(readLen); + int read = readEntries.readableBytes(); + readEntries.readBytes(b, offset, read); + readEntries.release(); + readBytes += read; + return readBytes; + } + + // reached end + return -1; + } + @Override public int read() throws IOException { // reading header @@ -162,11 +256,20 @@ public int read() throws IOException { @Override public void close() throws IOException { - super.close(); - dataBlockHeaderStream.close(); - if (!entriesByteBuf.isEmpty()) { - entriesByteBuf.forEach(buf -> buf.release()); - entriesByteBuf.clear(); + // The close method will be triggered twice in the BlobStoreManagedLedgerOffloader#offload method. + // The stream resource used by the try-with block which will called the close + // And through debug, writeBlobStore.uploadMultipartPart in the offload method also will trigger + // the close method. + // So we add the close variable to avoid release paddingBuf twice. + if (close.compareAndSet(false, true)) { + super.close(); + dataBlockHeaderStream.close(); + if (!entriesByteBuf.isEmpty()) { + entriesByteBuf.forEach(buf -> buf.release()); + entriesByteBuf.clear(); + } + paddingBuf.clear(); + paddingBuf.release(); } } @@ -185,6 +288,10 @@ public int getBlockSize() { return blockSize; } + public int getDataBlockFullOffset() { + return dataBlockFullOffset; + } + @Override public int getBlockEntryCount() { return blockEntryCount; diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/OffloadIndexBlockImpl.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/OffloadIndexBlockImpl.java index 2f64089c81cd9..a3fa14e763a06 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/OffloadIndexBlockImpl.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/OffloadIndexBlockImpl.java @@ -338,11 +338,7 @@ private OffloadIndexBlock fromStream(DataInputStream dis) throws IOException { int segmentMetadataLength = dis.readInt(); byte[] metadataBytes = new byte[segmentMetadataLength]; - - if (segmentMetadataLength != dis.read(metadataBytes)) { - log.error("Read ledgerMetadata from bytes failed"); - throw new IOException("Read ledgerMetadata from bytes failed"); - } + dis.readFully(metadataBytes); this.segmentMetadata = parseLedgerMetadata(metadataBytes); for (int i = 0; i < indexEntryCount; i++) { diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProvider.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProvider.java index 49dabb261212c..f29d9ad06782d 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProvider.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProvider.java @@ -38,6 +38,7 @@ import java.io.IOException; import java.io.Serializable; import java.nio.charset.Charset; +import java.util.Arrays; import java.util.Properties; import java.util.UUID; @@ -61,6 +62,7 @@ import org.jclouds.domain.LocationScope; import org.jclouds.googlecloud.GoogleCredentialsFromJson; import org.jclouds.googlecloudstorage.GoogleCloudStorageProviderMetadata; +import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.jclouds.providers.AnonymousProviderMetadata; import org.jclouds.providers.ProviderMetadata; import org.jclouds.s3.S3ApiMetadata; @@ -140,6 +142,7 @@ public void validate(TieredStorageConfiguration config) throws IllegalArgumentEx @Override public BlobStore getBlobStore(TieredStorageConfiguration config) { ContextBuilder contextBuilder = ContextBuilder.newBuilder(config.getProviderMetadata()); + contextBuilder.modules(Arrays.asList(new SLF4JLoggingModule())); contextBuilder.overrides(config.getOverrides()); if (config.getProviderCredentials() != null) { @@ -178,17 +181,34 @@ public void buildCredentials(TieredStorageConfiguration config) { ALIYUN_OSS("aliyun-oss", new AnonymousProviderMetadata(new S3ApiMetadata(), "")) { @Override public void validate(TieredStorageConfiguration config) throws IllegalArgumentException { - ALIYUN_OSS_VALIDATION.validate(config); + S3_VALIDATION.validate(config); } @Override public BlobStore getBlobStore(TieredStorageConfiguration config) { - return ALIYUN_OSS_BLOB_STORE_BUILDER.getBlobStore(config); + return S3_BLOB_STORE_BUILDER.getBlobStore(config); } @Override public void buildCredentials(TieredStorageConfiguration config) { - ALIYUN_OSS_CREDENTIAL_BUILDER.buildCredentials(config); + S3_CREDENTIAL_BUILDER.buildCredentials(config); + } + }, + + S3("S3", new AnonymousProviderMetadata(new S3ApiMetadata(), "")) { + @Override + public BlobStore getBlobStore(TieredStorageConfiguration config) { + return S3_BLOB_STORE_BUILDER.getBlobStore(config); + } + + @Override + public void buildCredentials(TieredStorageConfiguration config) { + S3_CREDENTIAL_BUILDER.buildCredentials(config); + } + + @Override + public void validate(TieredStorageConfiguration config) throws IllegalArgumentException { + S3_VALIDATION.validate(config); } }, @@ -205,6 +225,7 @@ public void validate(TieredStorageConfiguration config) throws IllegalArgumentEx public BlobStore getBlobStore(TieredStorageConfiguration config) { ContextBuilder builder = ContextBuilder.newBuilder("transient"); + builder.modules(Arrays.asList(new SLF4JLoggingModule())); BlobStoreContext ctx = builder .buildView(BlobStoreContext.class); @@ -287,6 +308,7 @@ public ProviderMetadata getProviderMetadata() { static final BlobStoreBuilder BLOB_STORE_BUILDER = (TieredStorageConfiguration config) -> { ContextBuilder contextBuilder = ContextBuilder.newBuilder(config.getProviderMetadata()); + contextBuilder.modules(Arrays.asList(new SLF4JLoggingModule())); contextBuilder.overrides(config.getOverrides()); if (StringUtils.isNotEmpty(config.getServiceEndpoint())) { @@ -369,11 +391,14 @@ public String getAWSSecretKey() { } }; - static final BlobStoreBuilder ALIYUN_OSS_BLOB_STORE_BUILDER = (TieredStorageConfiguration config) -> { + static final BlobStoreBuilder S3_BLOB_STORE_BUILDER = (TieredStorageConfiguration config) -> { ContextBuilder contextBuilder = ContextBuilder.newBuilder(config.getProviderMetadata()); + contextBuilder.modules(Arrays.asList(new SLF4JLoggingModule())); Properties overrides = config.getOverrides(); - // For security reasons, OSS supports only virtual hosted style access. - overrides.setProperty(S3Constants.PROPERTY_S3_VIRTUAL_HOST_BUCKETS, "true"); + if (ALIYUN_OSS.getDriver().equals(config.getDriver())) { + // For security reasons, OSS supports only virtual hosted style access. + overrides.setProperty(S3Constants.PROPERTY_S3_VIRTUAL_HOST_BUCKETS, "true"); + } contextBuilder.overrides(overrides); contextBuilder.endpoint(config.getServiceEndpoint()); @@ -390,7 +415,7 @@ public String getAWSSecretKey() { } }; - static final ConfigValidation ALIYUN_OSS_VALIDATION = (TieredStorageConfiguration config) -> { + static final ConfigValidation S3_VALIDATION = (TieredStorageConfiguration config) -> { if (Strings.isNullOrEmpty(config.getServiceEndpoint())) { throw new IllegalArgumentException( "ServiceEndpoint must specified for " + config.getDriver() + " offload"); @@ -408,14 +433,21 @@ public String getAWSSecretKey() { } }; - static final CredentialBuilder ALIYUN_OSS_CREDENTIAL_BUILDER = (TieredStorageConfiguration config) -> { - String accountName = System.getenv("ALIYUN_OSS_ACCESS_KEY_ID"); - if (StringUtils.isEmpty(accountName)) { - throw new IllegalArgumentException("Couldn't get the aliyun oss access key id."); + static final CredentialBuilder S3_CREDENTIAL_BUILDER = (TieredStorageConfiguration config) -> { + String accountName = System.getenv().getOrDefault("ACCESS_KEY_ID", ""); + // For forward compatibility + if (StringUtils.isEmpty(accountName.trim())) { + accountName = System.getenv().getOrDefault("ALIYUN_OSS_ACCESS_KEY_ID", ""); + } + if (StringUtils.isEmpty(accountName.trim())) { + throw new IllegalArgumentException("Couldn't get the access key id."); + } + String accountKey = System.getenv().getOrDefault("ACCESS_KEY_SECRET", ""); + if (StringUtils.isEmpty(accountKey.trim())) { + accountKey = System.getenv().getOrDefault("ALIYUN_OSS_ACCESS_KEY_SECRET", ""); } - String accountKey = System.getenv("ALIYUN_OSS_ACCESS_KEY_SECRET"); - if (StringUtils.isEmpty(accountKey)) { - throw new IllegalArgumentException("Couldn't get the aliyun oss access key secret."); + if (StringUtils.isEmpty(accountKey.trim())) { + throw new IllegalArgumentException("Couldn't get the access key secret."); } Credentials credentials = new Credentials( accountName, accountKey); diff --git a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfiguration.java b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfiguration.java index 442980ad336d2..18e3bbf0db8fe 100644 --- a/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfiguration.java +++ b/tiered-storage/jcloud/src/main/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfiguration.java @@ -244,7 +244,7 @@ public Integer getMaxBlockSizeInBytes() { return Integer.valueOf(configProperties.get(key)); } } - return new Integer(64 * MB); + return 64 * MB; } public Integer getMinBlockSizeInBytes() { @@ -262,7 +262,7 @@ public Integer getReadBufferSizeInBytes() { return Integer.valueOf(configProperties.get(key)); } } - return new Integer(MB); + return MB; } public Integer getWriteBufferSizeInBytes() { @@ -329,6 +329,19 @@ protected Properties getOverrides() { overrides.setProperty(S3Constants.PROPERTY_S3_VIRTUAL_HOST_BUCKETS, "false"); } + // load more jclouds properties into the overrides + System.getProperties().entrySet().stream() + .filter(p -> p.getKey().toString().startsWith("jclouds")) + .forEach(jcloudsProp -> { + overrides.setProperty(jcloudsProp.getKey().toString(), jcloudsProp.getValue().toString()); + }); + + System.getenv().entrySet().stream() + .filter(p -> p.getKey().toString().startsWith("jclouds")) + .forEach(jcloudsProp -> { + overrides.setProperty(jcloudsProp.getKey().toString(), jcloudsProp.getValue().toString()); + }); + log.info("getOverrides: {}", overrides.toString()); return overrides; } diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/BlobStoreBackedInputStreamTest.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/BlobStoreBackedInputStreamTest.java index ffe8fb20e3f21..36541b42c7527 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/BlobStoreBackedInputStreamTest.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/BlobStoreBackedInputStreamTest.java @@ -260,4 +260,27 @@ public void testSeekForward() throws Exception { toTest.seekForward(after); assertStreamsMatch(toTest, toCompare); } + + @Test + public void testAvailable() throws IOException { + String objectKey = "testAvailable"; + int objectSize = 2048; + RandomInputStream toWrite = new RandomInputStream(0, objectSize); + Payload payload = Payloads.newInputStreamPayload(toWrite); + payload.getContentMetadata().setContentLength((long)objectSize); + Blob blob = blobStore.blobBuilder(objectKey) + .payload(payload) + .contentLength(objectSize) + .build(); + String ret = blobStore.putBlob(BUCKET, blob); + BackedInputStream bis = new BlobStoreBackedInputStreamImpl( + blobStore, BUCKET, objectKey, (k, md) -> {}, objectSize, 512); + Assert.assertEquals(bis.available(), objectSize); + bis.seek(500); + Assert.assertEquals(bis.available(), objectSize - 500); + bis.seek(1024); + Assert.assertEquals(bis.available(), 1024); + bis.seek(2048); + Assert.assertEquals(bis.available(), 0); + } } diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloaderTest.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloaderTest.java index 90d8b1198f489..b78666ad6454f 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloaderTest.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlobStoreManagedLedgerOffloaderTest.java @@ -24,6 +24,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.fail; import java.io.IOException; import java.util.Collections; @@ -40,6 +41,7 @@ import org.apache.bookkeeper.client.api.LedgerEntry; import org.apache.bookkeeper.client.api.ReadHandle; import org.apache.bookkeeper.mledger.LedgerOffloader; +import org.apache.bookkeeper.mledger.ManagedLedgerException; import org.apache.bookkeeper.mledger.offload.jcloud.provider.JCloudBlobStoreProvider; import org.apache.bookkeeper.mledger.offload.jcloud.provider.TieredStorageConfiguration; import org.jclouds.blobstore.BlobStore; @@ -477,4 +479,45 @@ public void testReadUnknownIndexVersion() throws Exception { Assert.assertTrue(e.getCause().getMessage().contains("Invalid object version")); } } + + @Test + public void testReadEOFException() throws Throwable { + ReadHandle toWrite = buildReadHandle(DEFAULT_BLOCK_SIZE, 1); + LedgerOffloader offloader = getOffloader(); + UUID uuid = UUID.randomUUID(); + offloader.offload(toWrite, uuid, new HashMap<>()).get(); + + ReadHandle toTest = offloader.readOffloaded(toWrite.getId(), uuid, Collections.emptyMap()).get(); + Assert.assertEquals(toTest.getLastAddConfirmed(), toWrite.getLastAddConfirmed()); + toTest.readAsync(0, toTest.getLastAddConfirmed()).get(); + + try { + toTest.readAsync(0, 0).get(); + } catch (Exception e) { + fail("Get unexpected exception when reading entries", e); + } + } + + @Test + public void testReadWithAClosedLedgerHandler() throws Exception { + ReadHandle toWrite = buildReadHandle(DEFAULT_BLOCK_SIZE, 1); + LedgerOffloader offloader = getOffloader(); + UUID uuid = UUID.randomUUID(); + offloader.offload(toWrite, uuid, new HashMap<>()).get(); + + ReadHandle toTest = offloader.readOffloaded(toWrite.getId(), uuid, Collections.emptyMap()).get(); + Assert.assertEquals(toTest.getLastAddConfirmed(), toWrite.getLastAddConfirmed()); + long lac = toTest.getLastAddConfirmed(); + toTest.readAsync(0, lac).get(); + toTest.closeAsync().get(); + try { + toTest.readAsync(0, lac).get(); + } catch (Exception e) { + if (e.getCause() instanceof ManagedLedgerException.OffloadReadHandleClosedException) { + // expected exception + return; + } + throw e; + } + } } diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamTest.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamTest.java index 5cf6bd5650003..fff1ce8b7aa9a 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamTest.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BlockAwareSegmentInputStreamTest.java @@ -19,6 +19,7 @@ package org.apache.bookkeeper.mledger.offload.jcloud.impl; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.fail; import static org.testng.internal.junit.ArrayAsserts.assertArrayEquals; @@ -28,6 +29,8 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.lang.reflect.Field; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Iterator; @@ -44,6 +47,7 @@ import org.apache.bookkeeper.client.api.LedgerMetadata; import org.apache.bookkeeper.client.api.ReadHandle; import org.apache.bookkeeper.mledger.offload.jcloud.DataBlockHeader; +import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import org.testng.collections.Lists; @@ -206,8 +210,16 @@ public CompletableFuture closeAsync() { } } - @Test - public void testHaveEndPadding() throws Exception { + @DataProvider(name = "useBufferRead") + public static Object[][] useBufferRead() { + return new Object[][]{ + {Boolean.TRUE}, + {Boolean.FALSE} + }; + } + + @Test(dataProvider = "useBufferRead") + public void testHaveEndPadding(boolean useBufferRead) throws Exception { int ledgerId = 1; int entrySize = 8; int lac = 160; @@ -226,7 +238,12 @@ public void testHaveEndPadding() throws Exception { // verify read inputStream // 1. read header. 128 byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()]; - ByteStreams.readFully(inputStream, headerB); + if (useBufferRead) { + int ret = inputStream.read(headerB, 0, DataBlockHeaderImpl.getDataStartOffset()); + assertEquals(DataBlockHeaderImpl.getDataStartOffset(), ret); + } else { + ByteStreams.readFully(inputStream, headerB); + } DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB)); assertEquals(headerRead.getBlockLength(), blockSize); assertEquals(headerRead.getFirstEntryId(), 0); @@ -240,9 +257,18 @@ public void testHaveEndPadding() throws Exception { byte lengthBuf[] = new byte[4]; byte entryIdBuf[] = new byte[8]; byte content[] = new byte[entrySize]; - inputStream.read(lengthBuf); - inputStream.read(entryIdBuf); - inputStream.read(content); + if (useBufferRead) { + int read = inputStream.read(lengthBuf, 0, 4); + assertEquals(read, 4); + read = inputStream.read(entryIdBuf, 0, 8); + assertEquals(read, 8); + read = inputStream.read(content, 0, entrySize); + assertEquals(read, entrySize); + } else { + inputStream.read(lengthBuf); + inputStream.read(entryIdBuf); + inputStream.read(content); + } assertEquals(entrySize, Ints.fromByteArray(lengthBuf)); assertEquals(i, Longs.fromByteArray(entryIdBuf)); @@ -256,13 +282,36 @@ public void testHaveEndPadding() throws Exception { int left = blockSize - DataBlockHeaderImpl.getDataStartOffset() - expectedEntryCount * (entrySize + 4 + 8); assertEquals(left, 5); byte padding[] = new byte[left]; - inputStream.read(padding); + if (useBufferRead) { + int ret = 0; + int offset = 0; + while ((ret = inputStream.read(padding, offset, padding.length - offset)) > 0) { + offset += ret; + } + assertEquals(inputStream.read(padding, 0, padding.length), -1); + } else { + int len = left; + int offset = 0; + byte[] buf = new byte[4]; + while (len > 0) { + int ret = inputStream.read(buf); + for (int i = 0; i < ret; i++) { + padding[offset++] = buf[i]; + } + len -= ret; + } + } ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding); IntStream.range(0, paddingBuf.capacity()/4).forEach(i -> assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD))); // 4. reach end. + if (useBufferRead) { + byte[] b = new byte[4]; + int ret = inputStream.read(b, 0, 4); + assertEquals(ret, -1); + } assertEquals(inputStream.read(), -1); assertEquals(inputStream.getBlockEntryCount(), expectedEntryCount); @@ -272,8 +321,8 @@ public void testHaveEndPadding() throws Exception { inputStream.close(); } - @Test - public void testNoEndPadding() throws Exception { + @Test(dataProvider = "useBufferRead") + public void testNoEndPadding(boolean useBufferRead) throws Exception { int ledgerId = 1; int entrySize = 8; int lac = 120; @@ -293,7 +342,12 @@ public void testNoEndPadding() throws Exception { // verify read inputStream // 1. read header. 128 byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()]; - ByteStreams.readFully(inputStream, headerB); + if (useBufferRead) { + int ret = inputStream.read(headerB, 0, DataBlockHeaderImpl.getDataStartOffset()); + assertEquals(DataBlockHeaderImpl.getDataStartOffset(), ret); + } else { + ByteStreams.readFully(inputStream, headerB); + } DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB)); assertEquals(headerRead.getBlockLength(), blockSize); assertEquals(headerRead.getFirstEntryId(), 0); @@ -307,9 +361,18 @@ public void testNoEndPadding() throws Exception { byte lengthBuf[] = new byte[4]; byte entryIdBuf[] = new byte[8]; byte content[] = new byte[entrySize]; - inputStream.read(lengthBuf); - inputStream.read(entryIdBuf); - inputStream.read(content); + if (useBufferRead) { + int read = inputStream.read(lengthBuf, 0, 4); + assertEquals(read, 4); + read = inputStream.read(entryIdBuf, 0, 8); + assertEquals(read, 8); + read = inputStream.read(content, 0, entrySize); + assertEquals(read, entrySize); + } else { + inputStream.read(lengthBuf); + inputStream.read(entryIdBuf); + inputStream.read(content); + } assertEquals(entrySize, Ints.fromByteArray(lengthBuf)); assertEquals(i, Longs.fromByteArray(entryIdBuf)); @@ -324,6 +387,11 @@ public void testNoEndPadding() throws Exception { assertEquals(left, 0); // 4. reach end. + if (useBufferRead) { + byte[] b = new byte[4]; + int ret = inputStream.read(b, 0, 4); + assertEquals(ret, -1); + } assertEquals(inputStream.read(), -1); assertEquals(inputStream.getBlockEntryCount(), expectedEntryCount); @@ -333,8 +401,8 @@ public void testNoEndPadding() throws Exception { inputStream.close(); } - @Test - public void testReadTillLac() throws Exception { + @Test(dataProvider = "useBufferRead") + public void testReadTillLac(boolean useBufferRead) throws Exception { // simulate last data block read. int ledgerId = 1; int entrySize = 8; @@ -354,7 +422,12 @@ public void testReadTillLac() throws Exception { // verify read inputStream // 1. read header. 128 byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()]; - ByteStreams.readFully(inputStream, headerB); + if (useBufferRead) { + int ret = inputStream.read(headerB, 0, DataBlockHeaderImpl.getDataStartOffset()); + assertEquals(DataBlockHeaderImpl.getDataStartOffset(), ret); + } else { + ByteStreams.readFully(inputStream, headerB); + } DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB)); assertEquals(headerRead.getBlockLength(), blockSize); assertEquals(headerRead.getFirstEntryId(), 0); @@ -368,9 +441,18 @@ public void testReadTillLac() throws Exception { byte lengthBuf[] = new byte[4]; byte entryIdBuf[] = new byte[8]; byte content[] = new byte[entrySize]; - inputStream.read(lengthBuf); - inputStream.read(entryIdBuf); - inputStream.read(content); + if (useBufferRead) { + int read = inputStream.read(lengthBuf, 0, 4); + assertEquals(read, 4); + read = inputStream.read(entryIdBuf, 0, 8); + assertEquals(read, 8); + read = inputStream.read(content, 0, entrySize); + assertEquals(read, entrySize); + } else { + inputStream.read(lengthBuf); + inputStream.read(entryIdBuf); + inputStream.read(content); + } assertEquals(entrySize, Ints.fromByteArray(lengthBuf)); assertEquals(i, Longs.fromByteArray(entryIdBuf)); @@ -385,6 +467,11 @@ public void testReadTillLac() throws Exception { assertEquals(left, 0); // 4. reach end. + if (useBufferRead) { + byte[] b = new byte[4]; + int ret = inputStream.read(b, 0, 4); + assertEquals(ret, -1); + } assertEquals(inputStream.read(), -1); assertEquals(inputStream.getBlockEntryCount(), expectedEntryCount); @@ -394,8 +481,8 @@ public void testReadTillLac() throws Exception { inputStream.close(); } - @Test - public void testNoEntryPutIn() throws Exception { + @Test(dataProvider = "useBufferRead") + public void testNoEntryPutIn(boolean useBufferRead) throws Exception { // simulate first entry size over the block size budget, it shouldn't be added. // 2 entries, each with bigger size than block size, so there should no entry added into block. int ledgerId = 1; @@ -416,7 +503,12 @@ public void testNoEntryPutIn() throws Exception { // verify read inputStream // 1. read header. 128 byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()]; - ByteStreams.readFully(inputStream, headerB); + if (useBufferRead) { + int ret = inputStream.read(headerB, 0, DataBlockHeaderImpl.getDataStartOffset()); + assertEquals(DataBlockHeaderImpl.getDataStartOffset(), ret); + } else { + ByteStreams.readFully(inputStream, headerB); + } DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB)); assertEquals(headerRead.getBlockLength(), blockSize); assertEquals(headerRead.getFirstEntryId(), 0); @@ -424,13 +516,36 @@ public void testNoEntryPutIn() throws Exception { // 2. since no entry put in, it should only get padding after header. byte padding[] = new byte[blockSize - DataBlockHeaderImpl.getDataStartOffset()]; - inputStream.read(padding); + if (useBufferRead) { + int ret = 0; + int offset = 0; + while ((ret = inputStream.read(padding, offset, padding.length - offset)) > 0) { + offset += ret; + } + assertEquals(inputStream.read(padding, 0, padding.length), -1); + } else { + int len = padding.length; + int offset = 0; + byte[] buf = new byte[4]; + while (len > 0) { + int ret = inputStream.read(buf); + for (int i = 0; i < ret; i++) { + padding[offset++] = buf[i]; + } + len -= ret; + } + } ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding); IntStream.range(0, paddingBuf.capacity()/4).forEach(i -> assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD))); // 3. reach end. + if (useBufferRead) { + byte[] b = new byte[4]; + int ret = inputStream.read(b, 0, 4); + assertEquals(ret, -1); + } assertEquals(inputStream.read(), -1); assertEquals(inputStream.getBlockEntryCount(), 0); @@ -440,8 +555,8 @@ public void testNoEntryPutIn() throws Exception { inputStream.close(); } - @Test - public void testPaddingOnLastBlock() throws Exception { + @Test(dataProvider = "useBufferRead") + public void testPaddingOnLastBlock(boolean useBufferRead) throws Exception { int ledgerId = 1; int entrySize = 1000; int lac = 0; @@ -460,7 +575,12 @@ public void testPaddingOnLastBlock() throws Exception { // verify read inputStream // 1. read header. 128 byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()]; - ByteStreams.readFully(inputStream, headerB); + if (useBufferRead) { + int ret = inputStream.read(headerB, 0, DataBlockHeaderImpl.getDataStartOffset()); + assertEquals(DataBlockHeaderImpl.getDataStartOffset(), ret); + } else { + ByteStreams.readFully(inputStream, headerB); + } DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB)); assertEquals(headerRead.getBlockLength(), blockSize); assertEquals(headerRead.getFirstEntryId(), 0); @@ -474,9 +594,18 @@ public void testPaddingOnLastBlock() throws Exception { byte lengthBuf[] = new byte[4]; byte entryIdBuf[] = new byte[8]; byte content[] = new byte[entrySize]; - inputStream.read(lengthBuf); - inputStream.read(entryIdBuf); - inputStream.read(content); + if (useBufferRead) { + int read = inputStream.read(lengthBuf, 0, 4); + assertEquals(read, 4); + read = inputStream.read(entryIdBuf, 0, 8); + assertEquals(read, 8); + read = inputStream.read(content, 0, entrySize); + assertEquals(read, entrySize); + } else { + inputStream.read(lengthBuf); + inputStream.read(entryIdBuf); + inputStream.read(content); + } assertEquals(entrySize, Ints.fromByteArray(lengthBuf)); assertEquals(i, Longs.fromByteArray(entryIdBuf)); @@ -490,13 +619,36 @@ public void testPaddingOnLastBlock() throws Exception { int consumedBytes = DataBlockHeaderImpl.getDataStartOffset() + expectedEntryCount * (entrySize + BlockAwareSegmentInputStreamImpl.ENTRY_HEADER_SIZE); byte padding[] = new byte[blockSize - consumedBytes]; - inputStream.read(padding); + if (useBufferRead) { + int ret = 0; + int offset = 0; + while ((ret = inputStream.read(padding, offset, padding.length - offset)) > 0) { + offset += ret; + } + assertEquals(inputStream.read(padding, 0, padding.length), -1); + } else { + int len = blockSize - consumedBytes; + int offset = 0; + byte[] buf = new byte[4]; + while (len > 0) { + int ret = inputStream.read(buf); + for (int i = 0; i < ret; i++) { + padding[offset++] = buf[i]; + } + len -= ret; + } + } ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding); IntStream.range(0, paddingBuf.capacity()/4).forEach(i -> assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD))); // 3. reach end. + if (useBufferRead) { + byte[] b = new byte[4]; + int ret = inputStream.read(b, 0, 4); + assertEquals(ret, -1); + } assertEquals(inputStream.read(), -1); assertEquals(inputStream.getBlockEntryCount(), 1); @@ -530,4 +682,135 @@ public void testOnlyNegativeOnEOF() throws Exception { } } + @Test + public void testOnlyNegativeOnEOFWithBufferedRead() throws IOException { + int ledgerId = 1; + int entrySize = 10000; + int lac = 0; + + Random r = new Random(0); + ReadHandle readHandle = new MockReadHandle(ledgerId, entrySize, lac, () -> (byte)r.nextInt()); + + int blockSize = DataBlockHeaderImpl.getDataStartOffset() + entrySize * 2; + BlockAwareSegmentInputStreamImpl inputStream = new BlockAwareSegmentInputStreamImpl(readHandle, 0, blockSize); + + int bytesRead = 0; + int ret; + int offset = 0; + int resetOffsetCount = 0; + byte[] buf = new byte[1024]; + while ((ret = inputStream.read(buf, offset, buf.length - offset)) > 0) { + bytesRead += ret; + int currentOffset = offset; + offset = (offset + ret) % buf.length; + if (offset < currentOffset) { + resetOffsetCount++; + } + } + assertEquals(bytesRead, blockSize); + assertNotEquals(resetOffsetCount, 0); + } + + // This test is for testing the read(byte[] buf, int off, int len) method can work properly + // on the offset not 0. + @Test + public void testReadTillLacWithSmallBuffer() throws Exception { + // simulate last data block read. + int ledgerId = 1; + int entrySize = 8; + int lac = 89; + ReadHandle readHandle = new MockReadHandle(ledgerId, entrySize, lac); + + // set block size equals to (header + lac_entry) size. + int blockSize = DataBlockHeaderImpl.getDataStartOffset() + (1 + lac) * (entrySize + 4 + 8); + BlockAwareSegmentInputStreamImpl inputStream = new BlockAwareSegmentInputStreamImpl(readHandle, 0, blockSize); + int expectedEntryCount = (blockSize - DataBlockHeaderImpl.getDataStartOffset()) / (entrySize + 4 + 8); + + // verify get methods + assertEquals(inputStream.getLedger(), readHandle); + assertEquals(inputStream.getStartEntryId(), 0); + assertEquals(inputStream.getBlockSize(), blockSize); + + // verify read inputStream + // 1. read header. 128 + byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()]; + // read twice to test the offset not 0 case + int ret = inputStream.read(headerB, 0, 66); + assertEquals(ret, 66); + ret = inputStream.read(headerB, 66, headerB.length - 66); + assertEquals(headerB.length - 66, ret); + DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB)); + assertEquals(headerRead.getBlockLength(), blockSize); + assertEquals(headerRead.getFirstEntryId(), 0); + + byte[] entryData = new byte[entrySize]; + Arrays.fill(entryData, (byte)0xB); // 0xB is MockLedgerEntry.blockPadding + + // 2. read Ledger entries. 96 * 20 + IntStream.range(0, expectedEntryCount).forEach(i -> { + try { + byte lengthBuf[] = new byte[4]; + byte entryIdBuf[] = new byte[8]; + byte content[] = new byte[entrySize]; + + int read = inputStream.read(lengthBuf, 0, 4); + assertEquals(read, 4); + read = inputStream.read(entryIdBuf, 0, 8); + assertEquals(read, 8); + + Random random = new Random(System.currentTimeMillis()); + int o = 0; + int totalRead = 0; + int maxReadTime = 10; + while (o != content.length) { + int r; + if (maxReadTime-- == 0) { + r = entrySize - o; + } else { + r = random.nextInt(entrySize - o); + } + read = inputStream.read(content, o, r); + totalRead += read; + o += r; + } + assertEquals(totalRead, entrySize); + + assertEquals(entrySize, Ints.fromByteArray(lengthBuf)); + assertEquals(i, Longs.fromByteArray(entryIdBuf)); + assertArrayEquals(entryData, content); + } catch (Exception e) { + fail("meet exception", e); + } + }); + + // 3. should have no padding + int left = blockSize - DataBlockHeaderImpl.getDataStartOffset() - expectedEntryCount * (entrySize + 4 + 8); + assertEquals(left, 0); + assertEquals(inputStream.getBlockSize(), inputStream.getDataBlockFullOffset()); + + // 4. reach end. + byte[] b = new byte[4]; + ret = inputStream.read(b, 0, 4); + assertEquals(ret, -1); + + assertEquals(inputStream.getBlockEntryCount(), expectedEntryCount); + assertEquals(inputStream.getBlockEntryBytesCount(), entrySize * expectedEntryCount); + assertEquals(inputStream.getEndEntryId(), expectedEntryCount - 1); + + inputStream.close(); + } + + @Test + public void testCloseReleaseResources() throws Exception { + ReadHandle readHandle = new MockReadHandle(1, 10, 10); + + BlockAwareSegmentInputStreamImpl inputStream = new BlockAwareSegmentInputStreamImpl(readHandle, 0, 1024); + inputStream.read(); + Field field = BlockAwareSegmentInputStreamImpl.class.getDeclaredField("paddingBuf"); + field.setAccessible(true); + ByteBuf paddingBuf = (ByteBuf) field.get(inputStream); + assertEquals(1, paddingBuf.refCnt()); + inputStream.close(); + assertEquals(0, paddingBuf.refCnt()); + } } diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BufferedOffloadStreamTest.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BufferedOffloadStreamTest.java index 4ebf9434952eb..a5dc7ba17076f 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BufferedOffloadStreamTest.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/BufferedOffloadStreamTest.java @@ -42,7 +42,7 @@ public class BufferedOffloadStreamTest { final Random random = new Random(); - public void testWithPadding(int paddingLen) throws Exception { + private void testWithPadding(int paddingLen) throws Exception { int blockSize = StreamingDataBlockHeaderImpl.getDataStartOffset(); List entryBuffer = new LinkedList<>(); final UUID uuid = UUID.randomUUID(); diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java index 767190c8624aa..907aba67b9ef3 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/impl/MockManagedLedger.java @@ -122,8 +122,8 @@ public ManagedCursor newNonDurableCursor(Position startPosition, String subscrip @Override public ManagedCursor newNonDurableCursor(Position startPosition, String subscriptionName, - CommandSubscribe.InitialPosition initialPosition) throws - ManagedLedgerException { + CommandSubscribe.InitialPosition initialPosition, + boolean isReadCompacted) throws ManagedLedgerException { return null; } @@ -137,6 +137,11 @@ public void deleteCursor(String name) throws InterruptedException, ManagedLedger } + @Override + public void removeWaitingCursor(ManagedCursor cursor) { + + } + @Override public void asyncOpenCursor(String name, AsyncCallbacks.OpenCursorCallback callback, Object ctx) { diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProviderTests.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProviderTests.java index 28e5829ba2a5e..4f0c60bc00708 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProviderTests.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/JCloudBlobStoreProviderTests.java @@ -23,8 +23,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.bookkeeper.mledger.offload.jcloud.provider.JCloudBlobStoreProvider; -import org.apache.bookkeeper.mledger.offload.jcloud.provider.TieredStorageConfiguration; import org.testng.annotations.Test; public class JCloudBlobStoreProviderTests { @@ -105,4 +103,33 @@ public void transientValidationFailureTest() { config = new TieredStorageConfiguration(map); JCloudBlobStoreProvider.TRANSIENT.validate(config); } + + @Test() + public void s3ValidationTest() { + Map map = new HashMap<>(); + map.put("managedLedgerOffloadDriver", "S3"); + map.put("managedLedgerOffloadServiceEndpoint", "http://s3.service"); + map.put("managedLedgerOffloadBucket", "test-s3-bucket"); + TieredStorageConfiguration configuration = new TieredStorageConfiguration(map); + configuration.getProvider().validate(configuration); + } + + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "ServiceEndpoint must specified for S3 offload") + public void s3ValidationServiceEndpointMissed() { + Map map = new HashMap<>(); + map.put("managedLedgerOffloadDriver", "S3"); + TieredStorageConfiguration configuration = new TieredStorageConfiguration(map); + configuration.getProvider().validate(configuration); + } + + @Test(expectedExceptions = IllegalArgumentException.class, + expectedExceptionsMessageRegExp = "Bucket cannot be empty for S3 offload") + public void s3ValidationBucketMissed() { + Map map = new HashMap<>(); + map.put("managedLedgerOffloadDriver", "S3"); + map.put("managedLedgerOffloadServiceEndpoint", "http://s3.service"); + TieredStorageConfiguration configuration = new TieredStorageConfiguration(map); + configuration.getProvider().validate(configuration); + } } diff --git a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfigurationTests.java b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfigurationTests.java index f80f3ceaa1aa8..8370fb9580496 100644 --- a/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfigurationTests.java +++ b/tiered-storage/jcloud/src/test/java/org/apache/bookkeeper/mledger/offload/jcloud/provider/TieredStorageConfigurationTests.java @@ -22,6 +22,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Properties; + import org.jclouds.domain.Credentials; import org.testng.annotations.Test; @@ -127,19 +129,21 @@ public final void awsS3CredsProviderTest() { // set the aws properties with fake creds so the defaultProviderChain works System.setProperty("aws.accessKeyId", "fakeid1"); System.setProperty("aws.secretKey", "fakekey1"); - Credentials creds1 = config.getProviderCredentials().get(); - assertEquals(creds1.identity, "fakeid1"); - assertEquals(creds1.credential, "fakekey1"); - - // reset the properties and ensure we get different values by re-evaluating the chain - System.setProperty("aws.accessKeyId", "fakeid2"); - System.setProperty("aws.secretKey", "fakekey2"); - Credentials creds2 = config.getProviderCredentials().get(); - assertEquals(creds2.identity, "fakeid2"); - assertEquals(creds2.credential, "fakekey2"); + try { + Credentials creds1 = config.getProviderCredentials().get(); + assertEquals(creds1.identity, "fakeid1"); + assertEquals(creds1.credential, "fakekey1"); - System.clearProperty("aws.accessKeyId"); - System.clearProperty("aws.secretKey"); + // reset the properties and ensure we get different values by re-evaluating the chain + System.setProperty("aws.accessKeyId", "fakeid2"); + System.setProperty("aws.secretKey", "fakekey2"); + Credentials creds2 = config.getProviderCredentials().get(); + assertEquals(creds2.identity, "fakeid2"); + assertEquals(creds2.credential, "fakekey2"); + } finally { + System.clearProperty("aws.accessKeyId"); + System.clearProperty("aws.secretKey"); + } } /** @@ -205,4 +209,23 @@ public final void gcsBackwardCompatiblePropertiesTest() { assertEquals(config.getMaxBlockSizeInBytes(), new Integer(12)); assertEquals(config.getReadBufferSizeInBytes(), new Integer(500)); } + + @Test + public void overridePropertiesTest() { + Map map = new HashMap<>(); + map.put("s3ManagedLedgerOffloadServiceEndpoint", "http://localhost"); + map.put("s3ManagedLedgerOffloadRegion", "my-region"); + System.setProperty("jclouds.SystemPropertyA", "A"); + System.setProperty("jclouds.region", "jclouds-region"); + try { + TieredStorageConfiguration config = new TieredStorageConfiguration(map); + Properties properties = config.getOverrides(); + assertEquals(properties.get("jclouds.region"), "jclouds-region"); + assertEquals(config.getServiceEndpoint(), "http://localhost"); + assertEquals(properties.get("jclouds.SystemPropertyA"), "A"); + } finally { + System.clearProperty("jclouds.SystemPropertyA"); + System.clearProperty("jclouds.region"); + } + } } diff --git a/tiered-storage/pom.xml b/tiered-storage/pom.xml index fb04ef4c4805c..ea9dd883fa004 100644 --- a/tiered-storage/pom.xml +++ b/tiered-storage/pom.xml @@ -25,7 +25,7 @@ org.apache.pulsar pulsar - 2.9.0-SNAPSHOT + 2.9.3 ..