diff --git a/.github/.OwlBot-hermetic.yaml b/.github/.OwlBot-hermetic.yaml index 6e219ca0dd..13f6c0ab22 100644 --- a/.github/.OwlBot-hermetic.yaml +++ b/.github/.OwlBot-hermetic.yaml @@ -86,9 +86,9 @@ deep-preserve-regex: - "/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java" - "/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java" - "/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPool.java" -- "/owl-bot-staging/.*/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/" -- "/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/" - "/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Singletons.java" +- "/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json" +- "/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/resource-config.json" deep-copy-regex: - source: "/google/cloud/bigquery/storage/(v.*)/.*-java/proto-google-.*/src" diff --git a/.github/release-please.yml b/.github/release-please.yml index 615a27f0bf..45bcbc4d33 100644 --- a/.github/release-please.yml +++ b/.github/release-please.yml @@ -2,39 +2,22 @@ bumpMinorPreMajor: true handleGHRelease: true releaseType: java-yoshi branches: - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-yoshi - branch: java7 - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 2.4.x - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 2.12.x - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 2.25.x - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 2.38.x - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 2.47.x - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 3.5.x - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 3.11.x - - bumpMinorPreMajor: true - handleGHRelease: true - releaseType: java-backport - branch: 3.15.x + - branch: java7 + - branch: 2.4.x + releaseType: java-backport + - branch: 2.12.x + releaseType: java-backport + - branch: 2.25.x + releaseType: java-backport + - branch: 2.38.x + releaseType: java-backport + - branch: 2.47.x + releaseType: java-backport + - branch: 3.5.x + releaseType: java-backport + - branch: 3.11.x + releaseType: java-backport + - branch: 3.15.x + releaseType: java-backport + - branch: protobuf-4.x-rc + manifest: true diff --git a/.github/scripts/update_generation_config.sh b/.github/scripts/update_generation_config.sh index 92efcf8819..74d0e6cc41 100644 --- a/.github/scripts/update_generation_config.sh +++ b/.github/scripts/update_generation_config.sh @@ -18,7 +18,17 @@ function get_latest_released_version() { group_id_url_path="$(sed 's|\.|/|g' <<< "${group_id}")" url="https://repo1.maven.org/maven2/${group_id_url_path}/${artifact_id}/maven-metadata.xml" xml_content=$(curl -s --fail "${url}") - latest=$(xmllint --xpath 'metadata/versioning/latest/text()' - <<< "${xml_content}") + + # 1. Extract all version tags + # 2. Strip the XML tags to leave just the version numbers + # 3. Filter for strictly numbers.numbers.numbers (e.g., 2.54.0) + # 4. Sort by version (V) and take the last one (tail -n 1) + latest=$(echo "${xml_content}" \ + | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \ + | sed -E 's/<[^>]+>//g' \ + | sort -V \ + | tail -n 1) + if [[ -z "${latest}" ]]; then echo "The latest version of ${group_id}:${artifact_id} is empty." echo "The returned json from maven.org is invalid: ${json_content}" diff --git a/.github/workflows/hermetic_library_generation.yaml b/.github/workflows/hermetic_library_generation.yaml index 31a38fbb06..1716ff0b49 100644 --- a/.github/workflows/hermetic_library_generation.yaml +++ b/.github/workflows/hermetic_library_generation.yaml @@ -37,7 +37,7 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} - - uses: googleapis/sdk-platform-java/.github/scripts@v2.64.1 + - uses: googleapis/sdk-platform-java/.github/scripts@v2.65.1 if: env.SHOULD_RUN == 'true' with: base_ref: ${{ github.base_ref }} diff --git a/.github/workflows/unmanaged_dependency_check.yaml b/.github/workflows/unmanaged_dependency_check.yaml index fc7f576f62..fb15b88a77 100644 --- a/.github/workflows/unmanaged_dependency_check.yaml +++ b/.github/workflows/unmanaged_dependency_check.yaml @@ -17,6 +17,6 @@ jobs: # repository .kokoro/build.sh - name: Unmanaged dependency check - uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.54.1 + uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.55.1 with: bom-path: google-cloud-bigquerystorage-bom/pom.xml diff --git a/.kokoro/continuous/graalvm-native-a.cfg b/.kokoro/continuous/graalvm-native-a.cfg index b772eac66c..623fedf204 100644 --- a/.kokoro/continuous/graalvm-native-a.cfg +++ b/.kokoro/continuous/graalvm-native-a.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.54.1" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.55.1" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/continuous/graalvm-native-b.cfg b/.kokoro/continuous/graalvm-native-b.cfg index baf136cf82..bee69fa91d 100644 --- a/.kokoro/continuous/graalvm-native-b.cfg +++ b/.kokoro/continuous/graalvm-native-b.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.54.1" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.55.1" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/continuous/graalvm-native-c.cfg b/.kokoro/continuous/graalvm-native-c.cfg index 2fb2fc87c4..b742f6ec44 100644 --- a/.kokoro/continuous/graalvm-native-c.cfg +++ b/.kokoro/continuous/graalvm-native-c.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.54.1" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.55.1" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-a.cfg b/.kokoro/presubmit/graalvm-native-a.cfg index 0d98de5094..a2798bba2c 100644 --- a/.kokoro/presubmit/graalvm-native-a.cfg +++ b/.kokoro/presubmit/graalvm-native-a.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.54.1" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.55.1" } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-b.cfg b/.kokoro/presubmit/graalvm-native-b.cfg index c270bff717..1f804ecee9 100644 --- a/.kokoro/presubmit/graalvm-native-b.cfg +++ b/.kokoro/presubmit/graalvm-native-b.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.54.1" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.55.1" } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-c.cfg b/.kokoro/presubmit/graalvm-native-c.cfg index 720f8bcfaf..641a3fb1db 100644 --- a/.kokoro/presubmit/graalvm-native-c.cfg +++ b/.kokoro/presubmit/graalvm-native-c.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.54.1" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.55.1" } env_vars: { diff --git a/CHANGELOG.md b/CHANGELOG.md index f64defd9d1..8601208489 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## [3.19.1](https://github.com/googleapis/java-bigquerystorage/compare/v3.19.0...v3.19.1) (2026-01-15) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.65.1 ([4b9ce88](https://github.com/googleapis/java-bigquerystorage/commit/4b9ce887eb275403e0472f3561bc4abd2d2053b8)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-bigquery to v2.57.1 ([#3060](https://github.com/googleapis/java-bigquerystorage/issues/3060)) ([3a82884](https://github.com/googleapis/java-bigquerystorage/commit/3a828848626d62dde6f89d9ef4bc781bda2b4c5e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.55.1 ([#3173](https://github.com/googleapis/java-bigquerystorage/issues/3173)) ([fc161f2](https://github.com/googleapis/java-bigquerystorage/commit/fc161f230eef41c9458dcdc9ab95e9549e5134b8)) +* Update googleapis/sdk-platform-java action to v2.65.1 ([#3172](https://github.com/googleapis/java-bigquerystorage/issues/3172)) ([fdd8e4f](https://github.com/googleapis/java-bigquerystorage/commit/fdd8e4f2252dcca9aa31c6169a77f0c27f6ff554)) + + +### Documentation + +* Add samples for using timestamps with BQStorage Read and Write API ([#3167](https://github.com/googleapis/java-bigquerystorage/issues/3167)) ([332736e](https://github.com/googleapis/java-bigquerystorage/commit/332736e8a2e9569163838d266ff49897486b9c3a)) + +## [3.19.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.18.0...v3.19.0) (2025-12-12) + + +### Features + +* Add picosecond timestamp support for Json to Proto converter ([#3131](https://github.com/googleapis/java-bigquerystorage/issues/3131)) ([ea1bcc5](https://github.com/googleapis/java-bigquerystorage/commit/ea1bcc509b7c430f92c5764cc4121aaa282255bf)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.2 ([#3137](https://github.com/googleapis/java-bigquerystorage/issues/3137)) ([2dc42c7](https://github.com/googleapis/java-bigquerystorage/commit/2dc42c7a6a15c4b86f7012a0cd211ca09ffd9a0e)) +* Update googleapis/sdk-platform-java action to v2.64.2 ([#3138](https://github.com/googleapis/java-bigquerystorage/issues/3138)) ([28cbdd9](https://github.com/googleapis/java-bigquerystorage/commit/28cbdd9f5ab36e9d7d0aa8142260dc760e50a3fb)) + ## [3.18.0](https://github.com/googleapis/java-bigquerystorage/compare/v3.17.3...v3.18.0) (2025-11-13) diff --git a/README.md b/README.md index 51ff85e255..38d533f6d0 100644 --- a/README.md +++ b/README.md @@ -56,20 +56,20 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.71.0') +implementation platform('com.google.cloud:libraries-bom:26.73.0') implementation 'com.google.cloud:google-cloud-bigquerystorage' ``` If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-bigquerystorage:3.18.0' +implementation 'com.google.cloud:google-cloud-bigquerystorage:3.19.1' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "3.18.0" +libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "3.19.1" ``` ## Authentication @@ -146,6 +146,8 @@ Samples are in the [`samples/`](https://github.com/googleapis/java-bigquerystora | Export Open Telemetry | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java) | | Json Writer Stream Cdc | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/JsonWriterStreamCdc.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/JsonWriterStreamCdc.java) | | Parallel Write Committed Stream | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ParallelWriteCommittedStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ParallelWriteCommittedStream.java) | +| Read Timestamp Arrow | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java) | +| Read Timestamp Avro | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java) | | Storage Arrow Sample | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/StorageArrowSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/StorageArrowSample.java) | | Storage Sample | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/StorageSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/StorageSample.java) | | Write Buffered Stream | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteBufferedStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteBufferedStream.java) | @@ -153,6 +155,8 @@ Samples are in the [`samples/`](https://github.com/googleapis/java-bigquerystora | Write Nested Proto | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteNestedProto.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteNestedProto.java) | | Write Pending Stream | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WritePendingStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WritePendingStream.java) | | Write To Default Stream | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java) | +| Write To Default Stream Timestamp Json | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java) | +| Write To Default Stream Timestamp With Arrow | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java) | | Write To Default Stream With Arrow | [source code](https://github.com/googleapis/java-bigquerystorage/blob/main/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigquerystorage&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java) | @@ -232,32 +236,13 @@ information. Apache 2.0 - See [LICENSE][license] for more information. -## CI Status - -Java Version | Status ------------- | ------ -Java 8 | [![Kokoro CI][kokoro-badge-image-2]][kokoro-badge-link-2] -Java 8 OSX | [![Kokoro CI][kokoro-badge-image-3]][kokoro-badge-link-3] -Java 8 Windows | [![Kokoro CI][kokoro-badge-image-4]][kokoro-badge-link-4] -Java 11 | [![Kokoro CI][kokoro-badge-image-5]][kokoro-badge-link-5] - Java is a registered trademark of Oracle and/or its affiliates. [product-docs]: https://cloud.google.com/bigquery/docs/reference/storage/ [javadocs]: https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/history -[kokoro-badge-image-1]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java7.svg -[kokoro-badge-link-1]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java7.html -[kokoro-badge-image-2]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java8.svg -[kokoro-badge-link-2]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java8.html -[kokoro-badge-image-3]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java8-osx.svg -[kokoro-badge-link-3]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java8-osx.html -[kokoro-badge-image-4]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java8-win.svg -[kokoro-badge-link-4]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java8-win.html -[kokoro-badge-image-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java11.svg -[kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java11.html [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-bigquerystorage.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/3.18.0 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/3.19.1 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/generation_config.yaml b/generation_config.yaml index de2e8d18db..0cd3e41a58 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,6 +1,6 @@ -gapic_generator_version: 2.64.1 -googleapis_commitish: 5342712986262b93211b136eb4bd6fb79b3764af -libraries_bom_version: 26.71.0 +gapic_generator_version: 2.65.1 +googleapis_commitish: 415914bd49d41beaae8a9adb348ee2587c93aa70 +libraries_bom_version: 26.73.0 libraries: - api_shortname: bigquerystorage name_pretty: BigQuery Storage diff --git a/google-cloud-bigquerystorage-bom/pom.xml b/google-cloud-bigquerystorage-bom/pom.xml index 1ba0f37e23..2244f1174c 100644 --- a/google-cloud-bigquerystorage-bom/pom.xml +++ b/google-cloud-bigquerystorage-bom/pom.xml @@ -3,12 +3,12 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage-bom - 3.18.0 + 3.19.1 pom com.google.cloud sdk-platform-java-config - 3.54.1 + 3.55.1 Google Cloud bigquerystorage BOM @@ -52,57 +52,57 @@ com.google.cloud google-cloud-bigquerystorage - 3.18.0 + 3.19.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.190.0 + 0.191.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.190.0 + 0.191.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 3.18.0 + 3.19.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1alpha - 3.18.0 + 3.19.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta - 3.18.0 + 3.19.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.190.0 + 0.191.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.190.0 + 0.191.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 3.18.0 + 3.19.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1alpha - 3.18.0 + 3.19.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta - 3.18.0 + 3.19.1 diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index ea36c08bda..089b48d3e8 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage - 3.18.0 + 3.19.1 jar BigQuery Storage https://github.com/googleapis/java-bigquerystorage @@ -11,7 +11,7 @@ com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 google-cloud-bigquerystorage @@ -39,6 +39,20 @@ + + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.5.2 + + + org.apache.maven.surefire + surefire-junit-platform + ${surefire.version} + + + org.xolstice.maven.plugins protobuf-maven-plugin @@ -73,10 +87,6 @@ io.grpc grpc-protobuf - - io.grpc - grpc-util - com.google.api api-common @@ -197,6 +207,23 @@ junit test + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + + + org.junit.vintage + junit-vintage-engine + test + com.google.http-client google-http-client @@ -232,10 +259,9 @@ arrow-memory-core test - - io.grpc - grpc-testing + com.google.protobuf + protobuf-java-util test @@ -327,6 +353,10 @@ opentest4j ${opentest4j.version} + + org.junit.jupiter + junit-jupiter-engine + org.junit.vintage junit-vintage-engine @@ -340,6 +370,11 @@ maven-surefire-plugin ${surefire.version} + + org.junit.jupiter + junit-jupiter-engine + ${junit-vintage-engine.version} + org.junit.vintage junit-vintage-engine diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptor.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptor.java index 60bb739b23..5842f6d068 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptor.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptor.java @@ -30,6 +30,8 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.logging.Logger; /** * Converts a BQ table schema to protobuf descriptor. All field names will be converted to lowercase @@ -37,15 +39,18 @@ * shown in the ImmutableMaps below. */ public class BQTableSchemaToProtoDescriptor { - private static ImmutableMap - BQTableSchemaModeMap = - ImmutableMap.of( - TableFieldSchema.Mode.NULLABLE, FieldDescriptorProto.Label.LABEL_OPTIONAL, - TableFieldSchema.Mode.REPEATED, FieldDescriptorProto.Label.LABEL_REPEATED, - TableFieldSchema.Mode.REQUIRED, FieldDescriptorProto.Label.LABEL_REQUIRED); - private static ImmutableMap - BQTableSchemaTypeMap = + private static final Logger LOG = + Logger.getLogger(BQTableSchemaToProtoDescriptor.class.getName()); + + private static Map DEFAULT_BQ_TABLE_SCHEMA_MODE_MAP = + ImmutableMap.of( + TableFieldSchema.Mode.NULLABLE, FieldDescriptorProto.Label.LABEL_OPTIONAL, + TableFieldSchema.Mode.REPEATED, FieldDescriptorProto.Label.LABEL_REPEATED, + TableFieldSchema.Mode.REQUIRED, FieldDescriptorProto.Label.LABEL_REQUIRED); + + private static Map + DEFAULT_BQ_TABLE_SCHEMA_TYPE_MAP = new ImmutableMap.Builder() .put(TableFieldSchema.Type.BOOL, FieldDescriptorProto.Type.TYPE_BOOL) .put(TableFieldSchema.Type.BYTES, FieldDescriptorProto.Type.TYPE_BYTES) @@ -142,11 +147,13 @@ private static Descriptor convertBQTableSchemaToProtoDescriptorImpl( .setType(BQTableField.getRangeElementType().getType()) .setName("start") .setMode(Mode.NULLABLE) + .setTimestampPrecision(BQTableField.getTimestampPrecision()) .build(), TableFieldSchema.newBuilder() .setType(BQTableField.getRangeElementType().getType()) .setName("end") .setMode(Mode.NULLABLE) + .setTimestampPrecision(BQTableField.getTimestampPrecision()) .build()); if (dependencyMap.containsKey(rangeFields)) { @@ -189,7 +196,7 @@ private static Descriptor convertBQTableSchemaToProtoDescriptorImpl( * @param index Index for protobuf fields. * @param scope used to name descriptors */ - private static FieldDescriptorProto convertBQTableFieldToProtoField( + static FieldDescriptorProto convertBQTableFieldToProtoField( TableFieldSchema BQTableField, int index, String scope) { TableFieldSchema.Mode mode = BQTableField.getMode(); String fieldName = BQTableField.getName().toLowerCase(); @@ -198,7 +205,7 @@ private static FieldDescriptorProto convertBQTableFieldToProtoField( FieldDescriptorProto.newBuilder() .setName(fieldName) .setNumber(index) - .setLabel((FieldDescriptorProto.Label) BQTableSchemaModeMap.get(mode)); + .setLabel((FieldDescriptorProto.Label) DEFAULT_BQ_TABLE_SCHEMA_MODE_MAP.get(mode)); switch (BQTableField.getType()) { case STRUCT: @@ -206,12 +213,37 @@ private static FieldDescriptorProto convertBQTableFieldToProtoField( break; case RANGE: fieldDescriptor.setType( - (FieldDescriptorProto.Type) BQTableSchemaTypeMap.get(BQTableField.getType())); + (FieldDescriptorProto.Type) + DEFAULT_BQ_TABLE_SCHEMA_TYPE_MAP.get(BQTableField.getType())); fieldDescriptor.setTypeName(scope); break; + case TIMESTAMP: + // Can map to either int64 or string based on the BQ Field's timestamp precision + // Default: microsecond (6) maps to int64 and picosecond (12) maps to string. + long timestampPrecision = BQTableField.getTimestampPrecision().getValue(); + if (timestampPrecision == 12L) { + fieldDescriptor.setType( + (FieldDescriptorProto.Type) FieldDescriptorProto.Type.TYPE_STRING); + break; + } + // This should never happen as this is a server response issue. If this is the case, + // warn the user and use INT64 as the default is microsecond precision. + if (timestampPrecision != 6L && timestampPrecision != 0L) { + LOG.warning( + "BigQuery Timestamp field " + + BQTableField.getName() + + " has timestamp precision that is not 6 or 12. Defaulting to microsecond" + + " precision and mapping to INT64 protobuf type."); + } + // If the timestampPrecision value comes back as a null result from the server, + // timestampPrecision has a value of 0L. Use the INT64 to map to the type used + // for the default precision (microsecond). + fieldDescriptor.setType((FieldDescriptorProto.Type) FieldDescriptorProto.Type.TYPE_INT64); + break; default: fieldDescriptor.setType( - (FieldDescriptorProto.Type) BQTableSchemaTypeMap.get(BQTableField.getType())); + (FieldDescriptorProto.Type) + DEFAULT_BQ_TABLE_SCHEMA_TYPE_MAP.get(BQTableField.getType())); break; } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java index c1f4ade83b..05f482e8bb 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadSettings.java index 3309698726..a224174313 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -79,8 +79,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @Generated("by gapic-generator-java") public class BaseBigQueryReadSettings extends ClientSettings { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java index eee0de8958..916f7ef30c 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java index 5aae85edd3..2f7b439c1a 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -78,8 +78,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @Generated("by gapic-generator-java") public class BigQueryWriteSettings extends ClientSettings { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java index 9a4fecf780..6e5643f002 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java @@ -15,8 +15,14 @@ */ package com.google.cloud.bigquery.storage.v1; +import static java.time.temporal.ChronoField.HOUR_OF_DAY; +import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; +import static java.time.temporal.ChronoField.NANO_OF_SECOND; +import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; + import com.google.api.pathtemplate.ValidationException; import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.primitives.Doubles; @@ -26,15 +32,18 @@ import com.google.protobuf.Descriptors.Descriptor; import com.google.protobuf.Descriptors.FieldDescriptor; import com.google.protobuf.DynamicMessage; +import com.google.protobuf.Timestamp; import com.google.protobuf.UninitializedMessageException; import java.math.BigDecimal; import java.math.RoundingMode; +import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; +import java.time.format.DateTimeParseException; import java.time.format.TextStyle; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; @@ -42,6 +51,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; @@ -63,7 +74,31 @@ public class JsonToProtoMessage implements ToProtoConverter { .put(FieldDescriptor.Type.STRING, "string") .put(FieldDescriptor.Type.MESSAGE, "object") .build(); - private static final DateTimeFormatter TIMESTAMP_FORMATTER = + + private static final DateTimeFormatter TO_TIMESTAMP_FORMATTER = + new DateTimeFormatterBuilder() + .parseLenient() + .append(DateTimeFormatter.ISO_LOCAL_DATE) + .optionalStart() + .appendLiteral('T') + .optionalEnd() + .appendValue(HOUR_OF_DAY, 2) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2) + .optionalEnd() + .optionalStart() + .appendFraction(NANO_OF_SECOND, 6, 9, true) + .optionalEnd() + .optionalStart() + .appendOffset("+HHMM", "+00:00") + .optionalEnd() + .toFormatter() + .withZone(ZoneOffset.UTC); + + private static final DateTimeFormatter FROM_TIMESTAMP_FORMATTER = new DateTimeFormatterBuilder() .parseLenient() .append(DateTimeFormatter.ofPattern("yyyy[/][-]MM[/][-]dd")) @@ -120,6 +155,14 @@ public class JsonToProtoMessage implements ToProtoConverter { .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) .toFormatter(); + // Regex to identify >9 digits in the fraction part (e.g. `.123456789123`) + // Matches the dot, followed by 10+ digits (fractional part), followed by non-digits (like `+00`) + // or end of string + private static final Pattern ISO8601_TIMESTAMP_HIGH_PRECISION_PATTERN = + Pattern.compile("\\.(\\d{10,})(?:\\D|$)"); + private static final long MICROS_PER_SECOND = 1_000_000; + private static final int NANOS_PER_MICRO = 1_000; + /** You can use {@link #INSTANCE} instead */ public JsonToProtoMessage() {} @@ -620,25 +663,8 @@ private void fillField( return; } } else if (fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { - if (val instanceof String) { - Double parsed = Doubles.tryParse((String) val); - if (parsed != null) { - protoMsg.setField(fieldDescriptor, parsed.longValue()); - return; - } - TemporalAccessor parsedTime = TIMESTAMP_FORMATTER.parse((String) val); - protoMsg.setField( - fieldDescriptor, - parsedTime.getLong(ChronoField.INSTANT_SECONDS) * 1000000 - + parsedTime.getLong(ChronoField.MICRO_OF_SECOND)); - return; - } else if (val instanceof Long) { - protoMsg.setField(fieldDescriptor, val); - return; - } else if (val instanceof Integer) { - protoMsg.setField(fieldDescriptor, Long.valueOf((Integer) val)); - return; - } + protoMsg.setField(fieldDescriptor, getTimestampAsLong(val)); + return; } } if (val instanceof Integer) { @@ -685,6 +711,14 @@ private void fillField( } break; case STRING: + // Timestamp fields will be transmitted as a String if BQ's timestamp field is + // enabled to support picosecond. Check that the schema's field is timestamp before + // proceeding with the rest of the logic. Converts the supported types into a String. + // Supported types: https://docs.cloud.google.com/bigquery/docs/supported-data-types + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { + protoMsg.setField(fieldDescriptor, getTimestampAsString(val)); + return; + } if (val instanceof String) { protoMsg.setField(fieldDescriptor, val); return; @@ -897,24 +931,7 @@ private void fillRepeatedField( } } else if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { - if (val instanceof String) { - Double parsed = Doubles.tryParse((String) val); - if (parsed != null) { - protoMsg.addRepeatedField(fieldDescriptor, parsed.longValue()); - } else { - TemporalAccessor parsedTime = TIMESTAMP_FORMATTER.parse((String) val); - protoMsg.addRepeatedField( - fieldDescriptor, - parsedTime.getLong(ChronoField.INSTANT_SECONDS) * 1000000 - + parsedTime.getLong(ChronoField.MICRO_OF_SECOND)); - } - } else if (val instanceof Long) { - protoMsg.addRepeatedField(fieldDescriptor, val); - } else if (val instanceof Integer) { - protoMsg.addRepeatedField(fieldDescriptor, Long.valueOf((Integer) val)); - } else { - throwWrongFieldType(fieldDescriptor, currentScope, index); - } + protoMsg.addRepeatedField(fieldDescriptor, getTimestampAsLong(val)); } else if (val instanceof Integer) { protoMsg.addRepeatedField(fieldDescriptor, Long.valueOf((Integer) val)); } else if (val instanceof Long) { @@ -958,6 +975,14 @@ private void fillRepeatedField( } break; case STRING: + // Timestamp fields will be transmitted as a String if BQ's timestamp field is + // enabled to support picosecond. Check that the schema's field is timestamp before + // proceeding with the rest of the logic. Converts the supported types into a String. + // Supported types: https://docs.cloud.google.com/bigquery/docs/supported-data-types + if (fieldSchema != null && fieldSchema.getType() == TableFieldSchema.Type.TIMESTAMP) { + protoMsg.addRepeatedField(fieldDescriptor, getTimestampAsString(val)); + return; + } if (val instanceof String) { protoMsg.addRepeatedField(fieldDescriptor, val); } else if (val instanceof Short @@ -1002,6 +1027,76 @@ private void fillRepeatedField( } } + /** + * Converts microseconds from epoch to a Java Instant. + * + * @param micros the number of microseconds from 1970-01-01T00:00:00Z + * @return the Instant corresponding to the microseconds + */ + @VisibleForTesting + static Instant fromEpochMicros(long micros) { + long seconds = Math.floorDiv(micros, MICROS_PER_SECOND); + int nanos = (int) Math.floorMod(micros, MICROS_PER_SECOND) * NANOS_PER_MICRO; + + return Instant.ofEpochSecond(seconds, nanos); + } + + /** + * Best effort to try and convert a timestamp to an ISO8601 string. Standardize the timestamp + * output to be ISO_DATE_TIME (e.g. 2011-12-03T10:15:30+01:00) for timestamps up to nanosecond + * precision. For higher precision, the ISO8601 input is used as long as it is valid. + */ + @VisibleForTesting + static String getTimestampAsString(Object val) { + if (val instanceof String) { + String value = (String) val; + Double parsed = Doubles.tryParse(value); + // If true, it was a numeric value inside a String + if (parsed != null) { + return getTimestampAsString(parsed.longValue()); + } + // Validate the ISO8601 values before sending it to the server. + validateTimestamp(value); + + // If it's high precision (more than 9 digits), then return the ISO8601 string as-is + // as JDK does not have a DateTimeFormatter that supports more than nanosecond precision. + Matcher matcher = ISO8601_TIMESTAMP_HIGH_PRECISION_PATTERN.matcher(value); + if (matcher.find()) { + return value; + } + // Otherwise, output the timestamp to a standard format before sending it to BQ + Instant instant = FROM_TIMESTAMP_FORMATTER.parse(value, Instant::from); + return TO_TIMESTAMP_FORMATTER.format(instant); + } else if (val instanceof Number) { + // Micros from epoch will most likely will be represented a Long, but any numeric + // value can be used + Instant instant = fromEpochMicros(((Number) val).longValue()); + return TO_TIMESTAMP_FORMATTER.format(instant); + } else if (val instanceof Timestamp) { + // Convert the Protobuf timestamp class to ISO8601 string + Timestamp timestamp = (Timestamp) val; + return TO_TIMESTAMP_FORMATTER.format( + Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos())); + } + throw new IllegalArgumentException("The timestamp value passed in is not from a valid type"); + } + + /* Best effort to try and convert the Object to a long (microseconds since epoch) */ + private long getTimestampAsLong(Object val) { + if (val instanceof String) { + Double parsed = Doubles.tryParse((String) val); + if (parsed != null) { + return parsed.longValue(); + } + TemporalAccessor parsedTime = FROM_TIMESTAMP_FORMATTER.parse((String) val); + return parsedTime.getLong(ChronoField.INSTANT_SECONDS) * 1000000 + + parsedTime.getLong(ChronoField.MICRO_OF_SECOND); + } else if (val instanceof Number) { + return ((Number) val).longValue(); + } + throw new IllegalArgumentException("The timestamp value passed in is not from a valid type"); + } + private static void throwWrongFieldType( FieldDescriptor fieldDescriptor, String currentScope, int index) { throw new IllegalArgumentException( @@ -1009,4 +1104,43 @@ private static void throwWrongFieldType( "JSONObject does not have a %s field at %s[%d].", FIELD_TYPE_TO_DEBUG_MESSAGE.get(fieldDescriptor.getType()), currentScope, index)); } + + /** + * Internal helper method to check that the timestamp follows the expected String input of ISO8601 + * string. Allows the fractional portion of the timestamp to support up to 12 digits of precision + * (up to picosecond). + * + * @throws IllegalArgumentException if timestamp is invalid or exceeds picosecond precision + */ + @VisibleForTesting + static void validateTimestamp(String timestamp) { + // Check if the string has greater than nanosecond precision (>9 digits in fractional second) + Matcher matcher = ISO8601_TIMESTAMP_HIGH_PRECISION_PATTERN.matcher(timestamp); + if (matcher.find()) { + // Group 1 is the fractional second part of the ISO8601 string + String fraction = matcher.group(1); + // Pos 10-12 of the fractional second are guaranteed to be digits. The regex only + // matches the fraction section as long as they are digits. + if (fraction.length() > 12) { + throw new IllegalArgumentException( + "Fractional second portion of ISO8601 only supports up to picosecond (12 digits) in" + + " BigQuery"); + } + + // Replace the entire fractional second portion with just the nanosecond portion. + // The new timestamp will be validated against the JDK's DateTimeFormatter + String truncatedFraction = fraction.substring(0, 9); + timestamp = + new StringBuilder(timestamp) + .replace(matcher.start(1), matcher.end(1), truncatedFraction) + .toString(); + } + + // It is valid as long as DateTimeFormatter doesn't throw an exception + try { + FROM_TIMESTAMP_FORMATTER.parse((String) timestamp); + } catch (DateTimeParseException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java index 828bfa0309..6120aaef9d 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStub.java index 8cbafb0a5d..4b5f2d7161 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java index 1bf1721b1a..eb68149d76 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -95,8 +95,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @Generated("by gapic-generator-java") public class BigQueryReadStubSettings extends StubSettings { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStub.java index e05372eac4..b962c2afb0 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java index 387dae4be4..c8f949cc3e 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -100,8 +100,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @Generated("by gapic-generator-java") public class BigQueryWriteStubSettings extends StubSettings { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java index 634bef77d1..3127fbea87 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java index 4a7c118458..2dba1b3fe4 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java index 3c27e77cfd..929eb01577 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteCallableFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java index ef0769c574..b8090f2e28 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryWriteStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClient.java index d868181efe..b543150c43 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceSettings.java index b5fff3a709..401b2242b8 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -82,8 +82,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/package-info.java index bceae7b189..50643a66a6 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/package-info.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceCallableFactory.java index 6735fa8c04..b0a49fde53 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceCallableFactory.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceCallableFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceStub.java index 40cdbde9ce..9342331597 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/GrpcMetastorePartitionServiceStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStub.java index 3df0bad0ea..74cb15242b 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStubSettings.java index d56a7dd047..6e07970bba 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha/stub/MetastorePartitionServiceStubSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -101,8 +101,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClient.java index 3a2ac0fc1b..773e361c95 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceSettings.java index 903e03736f..dba660ff05 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -82,8 +82,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/package-info.java index 5f29e61a07..e8bba016cc 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/package-info.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceCallableFactory.java index 14e48e76a8..e23f847dd5 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceCallableFactory.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceCallableFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceStub.java index 1559e7da2d..e1f32e9362 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/GrpcMetastorePartitionServiceStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStub.java index b44696d99e..db9a876a64 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStubSettings.java index b47a96b906..96cd9e15fd 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta/stub/MetastorePartitionServiceStubSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -101,8 +101,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java index ae919a2fc8..70b26e2275 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java index bec551df94..495c661835 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -82,8 +82,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java index 4d8be4e723..ea7daa69dc 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java index 397e2b4982..175c5be352 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java index 314b11e983..4203903307 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -93,8 +93,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java index ab4415c32d..c993ba046e 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java index 1c9a4f2a0c..3434d9e47c 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java index 0e8687471c..5f7328eeac 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java index 22b58a420c..63dadc9d0a 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -80,8 +80,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClient.java index b0dc1ce689..d9937cfb43 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClient.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteSettings.java index 30c8919f76..f26b59ffd7 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -79,8 +79,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. * * @deprecated This class is deprecated and will be removed in the next major version update. */ diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java index 365862ae9f..afdbb4db6b 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java index 716535dd4e..bed68ab12e 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java index 981bdf684b..f784b748b2 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -96,8 +96,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. */ @BetaApi @Generated("by gapic-generator-java") diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStub.java index 7d848fe7d5..3961831078 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStubSettings.java index cc16d2ed0b..8bd7b46647 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryWriteStubSettings.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -101,8 +101,8 @@ * } * * Please refer to the [Client Side Retry - * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for - * additional support in setting retries. + * Guide](https://docs.cloud.google.com/java/docs/client-retries) for additional support in setting + * retries. * * @deprecated This class is deprecated and will be removed in the next major version update. */ diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java index 564284fe30..ac801d74c4 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java index 849169fdb9..5aa8a8b04c 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteCallableFactory.java index 62c6f2cc4b..1d219f871c 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteCallableFactory.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteCallableFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteStub.java index 66abf12d98..8c2de6b90d 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteStub.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryWriteStub.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/reflect-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/reflect-config.json index 1d276247df..d25add38a6 100644 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/reflect-config.json +++ b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/reflect-config.json @@ -575,6 +575,15 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions$PicosTimestampPrecision", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.bigquery.storage.v1.AvroRows", "queryAllDeclaredConstructors": true, @@ -629,6 +638,15 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.cloud.bigquery.storage.v1.AvroSerializationOptions$PicosTimestampPrecision", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest", "queryAllDeclaredConstructors": true, @@ -2176,94 +2194,5 @@ "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator$Config", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerOption", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.NettyAllocationManager$1", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.RootAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$Int", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$PrimitiveType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true - }, - { - "name": "org.apache.arrow.vector.types.pojo.DictionaryEncoding", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Field", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Schema", - "allDeclaredFields": true - }, - { - "name":"io.netty.buffer.AbstractReferenceCountedByteBuf", - "fields":[{"name":"refCnt"}] } ] \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/reflect-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/reflect-config.json index 0373f28ba4..3ebb7d9d78 100644 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/reflect-config.json +++ b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/reflect-config.json @@ -1573,94 +1573,5 @@ "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator$Config", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerOption", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.NettyAllocationManager$1", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.RootAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$Int", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$PrimitiveType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true - }, - { - "name": "org.apache.arrow.vector.types.pojo.DictionaryEncoding", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Field", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Schema", - "allDeclaredFields": true - }, - { - "name":"io.netty.buffer.AbstractReferenceCountedByteBuf", - "fields":[{"name":"refCnt"}] } ] \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/resource-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/resource-config.json deleted file mode 100644 index 2b2ee30f5f..0000000000 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1alpha/resource-config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "resources":{ - "includes":[ - { - "pattern":"\\Qorg/apache/arrow/memory/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class\\E" - } - ] - }, - "globs":[ - { - "glob": "org/apache/arrow/memory/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class" - } - ] -} \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/reflect-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/reflect-config.json index e0e75e3cf7..e5f85989c9 100644 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/reflect-config.json +++ b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/reflect-config.json @@ -1573,94 +1573,5 @@ "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator$Config", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerOption", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.NettyAllocationManager$1", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.RootAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$Int", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$PrimitiveType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true - }, - { - "name": "org.apache.arrow.vector.types.pojo.DictionaryEncoding", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Field", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Schema", - "allDeclaredFields": true - }, - { - "name":"io.netty.buffer.AbstractReferenceCountedByteBuf", - "fields":[{"name":"refCnt"}] } ] \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/resource-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/resource-config.json deleted file mode 100644 index 2b2ee30f5f..0000000000 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta/resource-config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "resources":{ - "includes":[ - { - "pattern":"\\Qorg/apache/arrow/memory/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class\\E" - } - ] - }, - "globs":[ - { - "glob": "org/apache/arrow/memory/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class" - } - ] -} \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/reflect-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/reflect-config.json index 469ed30aae..0e0aeab81a 100644 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/reflect-config.json +++ b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/reflect-config.json @@ -1591,94 +1591,5 @@ "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator$Config", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerOption", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.NettyAllocationManager$1", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.RootAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$Int", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$PrimitiveType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true - }, - { - "name": "org.apache.arrow.vector.types.pojo.DictionaryEncoding", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Field", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Schema", - "allDeclaredFields": true - }, - { - "name":"io.netty.buffer.AbstractReferenceCountedByteBuf", - "fields":[{"name":"refCnt"}] } ] \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/resource-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/resource-config.json deleted file mode 100644 index 2b2ee30f5f..0000000000 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta1/resource-config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "resources":{ - "includes":[ - { - "pattern":"\\Qorg/apache/arrow/memory/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class\\E" - } - ] - }, - "globs":[ - { - "glob": "org/apache/arrow/memory/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class" - } - ] -} \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/reflect-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/reflect-config.json index db8a47f249..8003317ef0 100644 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/reflect-config.json +++ b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/reflect-config.json @@ -2059,94 +2059,5 @@ "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.BaseAllocator$Config", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerOption", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.NettyAllocationManager$1", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.netty.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.RootAllocator", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", - "allDeclaredFields":true, - "queryAllDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$Int", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true, - "queryAllDeclaredConstructors":true - }, - { - "name":"org.apache.arrow.vector.types.pojo.ArrowType$PrimitiveType", - "allDeclaredFields":true, - "queryAllDeclaredMethods":true - }, - { - "name": "org.apache.arrow.vector.types.pojo.DictionaryEncoding", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Field", - "allDeclaredFields": true - }, - { - "name": "org.apache.arrow.vector.types.pojo.Schema", - "allDeclaredFields": true - }, - { - "name":"io.netty.buffer.AbstractReferenceCountedByteBuf", - "fields":[{"name":"refCnt"}] } ] \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/resource-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/resource-config.json deleted file mode 100644 index 2b2ee30f5f..0000000000 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1beta2/resource-config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "resources":{ - "includes":[ - { - "pattern":"\\Qorg/apache/arrow/memory/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class\\E" - }, - { - "pattern":"\\Qorg/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class\\E" - } - ] - }, - "globs":[ - { - "glob": "org/apache/arrow/memory/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/netty/DefaultAllocationManagerFactory.class" - }, - { - "glob": "org/apache/arrow/memory/unsafe/DefaultAllocationManagerFactory.class" - } - ] -} \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json index 4991643217..213bb0f577 100644 --- a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json +++ b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/reflect-config.json @@ -21,5 +21,94 @@ "allPublicMethods": true, "allDeclaredClasses": true, "allPublicClasses": true + }, + { + "name":"org.apache.arrow.memory.BaseAllocator", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.BaseAllocator$Config", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.DefaultAllocationManagerOption", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.netty.NettyAllocationManager$1", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.netty.DefaultAllocationManagerFactory", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.RootAllocator", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.memory.DefaultAllocationManagerFactory", + "allDeclaredFields":true, + "queryAllDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.vector.types.pojo.ArrowType", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.vector.types.pojo.ArrowType$Int", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true + }, + { + "name":"org.apache.arrow.vector.types.pojo.ArrowType$PrimitiveType", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true + }, + { + "name": "org.apache.arrow.vector.types.pojo.DictionaryEncoding", + "allDeclaredFields": true + }, + { + "name": "org.apache.arrow.vector.types.pojo.Field", + "allDeclaredFields": true + }, + { + "name": "org.apache.arrow.vector.types.pojo.Schema", + "allDeclaredFields": true + }, + { + "name":"io.netty.buffer.AbstractReferenceCountedByteBuf", + "fields":[{"name":"refCnt"}] } ] \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/resource-config.json b/google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/resource-config.json similarity index 100% rename from google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud.bigquery.storage.v1/resource-config.json rename to google-cloud-bigquerystorage/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-bigquerystorage/resource-config.json diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util/ErrorsTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util/ErrorsTest.java index 2acfbae123..ce4f8d7ac5 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util/ErrorsTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/util/ErrorsTest.java @@ -15,25 +15,22 @@ */ package com.google.cloud.bigquery.storage.util; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.protobuf.Duration; import com.google.protobuf.Parser; import com.google.rpc.RetryInfo; import io.grpc.Metadata; import io.grpc.Status; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; -@RunWith(JUnit4.class) -public class ErrorsTest { +class ErrorsTest { @Test - public void testRetryableInternalForRstErrors() { + void testRetryableInternalForRstErrors() { assertTrue( Errors.isRetryableInternalStatus( Status.INTERNAL.withDescription( @@ -46,13 +43,13 @@ public void testRetryableInternalForRstErrors() { } @Test - public void testNonRetryableInternalError() { + void testNonRetryableInternalError() { assertFalse(Errors.isRetryableInternalStatus(Status.INTERNAL)); assertFalse(Errors.isRetryableInternalStatus(Status.INTERNAL.withDescription("Server error."))); } @Test - public void testNonRetryableOtherError() { + void testNonRetryableOtherError() { assertFalse( Errors.isRetryableInternalStatus( Status.DATA_LOSS.withDescription( @@ -60,7 +57,7 @@ public void testNonRetryableOtherError() { } @Test - public void testIsRetryableStatus() { + void testIsRetryableStatus() { Errors.IsRetryableStatusResult result = Errors.isRetryableStatus( Status.INTERNAL.withDescription( diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptorTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptorTest.java index ba845c1c12..93c10895cf 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptorTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BQTableSchemaToProtoDescriptorTest.java @@ -15,38 +15,39 @@ */ package com.google.cloud.bigquery.storage.v1; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; -import com.google.cloud.bigquery.storage.test.JsonTest.*; -import com.google.cloud.bigquery.storage.test.SchemaTest.*; +import com.google.cloud.bigquery.storage.test.JsonTest; +import com.google.cloud.bigquery.storage.test.SchemaTest; import com.google.common.collect.ImmutableMap; +import com.google.protobuf.DescriptorProtos; +import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.Descriptor; import com.google.protobuf.Descriptors.FieldDescriptor; +import com.google.protobuf.Int64Value; import java.util.HashMap; import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; -@RunWith(JUnit4.class) -public class BQTableSchemaToProtoDescriptorTest { +class BQTableSchemaToProtoDescriptorTest { // This is a map between the TableFieldSchema.Type and the descriptor it is supposed to // produce. The produced descriptor will be used to check against the entry values here. - private static ImmutableMap - BQTableTypeToCorrectProtoDescriptorTest = - new ImmutableMap.Builder() - .put(TableFieldSchema.Type.BOOL, BoolType.getDescriptor()) - .put(TableFieldSchema.Type.BYTES, BytesType.getDescriptor()) - .put(TableFieldSchema.Type.DATE, Int32Type.getDescriptor()) - .put(TableFieldSchema.Type.DATETIME, Int64Type.getDescriptor()) - .put(TableFieldSchema.Type.DOUBLE, DoubleType.getDescriptor()) - .put(TableFieldSchema.Type.GEOGRAPHY, StringType.getDescriptor()) - .put(TableFieldSchema.Type.INT64, Int64Type.getDescriptor()) - .put(TableFieldSchema.Type.NUMERIC, BytesType.getDescriptor()) - .put(TableFieldSchema.Type.STRING, StringType.getDescriptor()) - .put(TableFieldSchema.Type.TIME, Int64Type.getDescriptor()) - .put(TableFieldSchema.Type.TIMESTAMP, Int64Type.getDescriptor()) - .build(); + private static Map BQTableTypeToCorrectProtoDescriptorTest = + new ImmutableMap.Builder() + .put(TableFieldSchema.Type.BOOL, SchemaTest.BoolType.getDescriptor()) + .put(TableFieldSchema.Type.BYTES, SchemaTest.BytesType.getDescriptor()) + .put(TableFieldSchema.Type.DATE, SchemaTest.Int32Type.getDescriptor()) + .put(TableFieldSchema.Type.DATETIME, SchemaTest.Int64Type.getDescriptor()) + .put(TableFieldSchema.Type.DOUBLE, SchemaTest.DoubleType.getDescriptor()) + .put(TableFieldSchema.Type.GEOGRAPHY, SchemaTest.StringType.getDescriptor()) + .put(TableFieldSchema.Type.INT64, SchemaTest.Int64Type.getDescriptor()) + .put(TableFieldSchema.Type.NUMERIC, SchemaTest.BytesType.getDescriptor()) + .put(TableFieldSchema.Type.STRING, SchemaTest.StringType.getDescriptor()) + .put(TableFieldSchema.Type.TIME, SchemaTest.Int64Type.getDescriptor()) + .put(TableFieldSchema.Type.TIMESTAMP, SchemaTest.Int64Type.getDescriptor()) + .build(); // Creates mapping from descriptor to how many times it was reused. private void mapDescriptorToCount(Descriptor descriptor, HashMap map) { @@ -64,31 +65,34 @@ private void mapDescriptorToCount(Descriptor descriptor, HashMap entry : BQTableTypeToCorrectProtoDescriptorTest.entrySet()) { final TableFieldSchema tableFieldSchema = @@ -101,12 +105,29 @@ public void testSimpleTypes() throws Exception { TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); final Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, entry.getValue()); + assertDescriptorsAreEqual(entry.getValue(), descriptor); } } + // BQ Timestamp field with higher precision (12) is mapped to a String protobuf type (not int64) @Test - public void testRange() throws Exception { + void testTimestampType_higherTimestampPrecision() + throws Descriptors.DescriptorValidationException { + TableFieldSchema tableFieldSchema = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .setName("test_field_type") + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); + Descriptor descriptor = + BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); + assertDescriptorsAreEqual(SchemaTest.StringType.getDescriptor(), descriptor); + } + + @Test + void testRange() throws Exception { final TableSchema tableSchema = TableSchema.newBuilder() .addFields( @@ -172,11 +193,11 @@ public void testRange() throws Exception { .build(); final Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, TestRange.getDescriptor()); + assertDescriptorsAreEqual(JsonTest.TestRange.getDescriptor(), descriptor); } @Test - public void testStructSimple() throws Exception { + void testStructSimple() throws Exception { final TableFieldSchema stringType = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.STRING) @@ -193,11 +214,11 @@ public void testStructSimple() throws Exception { final TableSchema tableSchema = TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); final Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, MessageType.getDescriptor()); + assertDescriptorsAreEqual(SchemaTest.MessageType.getDescriptor(), descriptor); } @Test - public void testStructComplex() throws Exception { + void testStructComplex() throws Exception { final TableFieldSchema test_int = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.INT64) @@ -387,6 +408,20 @@ public void testStructComplex() throws Exception { .setMode(TableFieldSchema.Mode.REPEATED) .setName("test_json") .build(); + final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_timestamp_higher_precision") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); + final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_timestamp_higher_precision_repeated") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); final TableSchema tableSchema = TableSchema.newBuilder() .addFields(0, test_int) @@ -420,14 +455,16 @@ public void testStructComplex() throws Exception { .addFields(28, TEST_BIGNUMERIC_DOUBLE) .addFields(29, TEST_INTERVAL) .addFields(30, TEST_JSON) + .addFields(31, TEST_TIMESTAMP_HIGHER_PRECISION) + .addFields(32, TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) .build(); final Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, ComplexRoot.getDescriptor()); + assertDescriptorsAreEqual(JsonTest.ComplexRoot.getDescriptor(), descriptor); } @Test - public void testCasingComplexStruct() throws Exception { + void testCasingComplexStruct() throws Exception { final TableFieldSchema required = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.INT64) @@ -503,11 +540,11 @@ public void testCasingComplexStruct() throws Exception { .build(); final Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, CasingComplex.getDescriptor()); + assertDescriptorsAreEqual(JsonTest.CasingComplex.getDescriptor(), descriptor); } @Test - public void testOptions() throws Exception { + void testOptions() throws Exception { final TableFieldSchema required = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.INT64) @@ -534,11 +571,11 @@ public void testOptions() throws Exception { .build(); final Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, OptionTest.getDescriptor()); + assertDescriptorsAreEqual(JsonTest.OptionTest.getDescriptor(), descriptor); } @Test - public void testDescriptorReuseDuringCreation() throws Exception { + void testDescriptorReuseDuringCreation() throws Exception { final TableFieldSchema test_int = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.INT64) @@ -591,11 +628,11 @@ public void testDescriptorReuseDuringCreation() throws Exception { assertEquals(descriptorToCount.get("root__reuse_lvl1").intValue(), 3); assertTrue(descriptorToCount.containsKey("root__reuse_lvl1__reuse_lvl2")); assertEquals(descriptorToCount.get("root__reuse_lvl1__reuse_lvl2").intValue(), 3); - isDescriptorEqual(descriptor, ReuseRoot.getDescriptor()); + assertDescriptorsAreEqual(JsonTest.ReuseRoot.getDescriptor(), descriptor); } @Test - public void testNestedFlexibleFieldName() throws Exception { + void testNestedFlexibleFieldName() throws Exception { final TableFieldSchema stringField = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.STRING) @@ -619,6 +656,69 @@ public void testNestedFlexibleFieldName() throws Exception { TableSchema.newBuilder().addFields(0, stringField).addFields(1, nestedField).build(); final Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, TestNestedFlexibleFieldName.getDescriptor()); + assertDescriptorsAreEqual(SchemaTest.TestNestedFlexibleFieldName.getDescriptor(), descriptor); + } + + @Test + void timestampField_defaultPrecision() throws Exception { + TableFieldSchema timestampField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto.getType()); + } + + @Test + void timestampField_picosecondPrecision() throws Exception { + TableFieldSchema timestampField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, fieldDescriptorProto.getType()); + } + + @Test + void timestampField_unexpectedPrecision() throws Exception { + TableFieldSchema timestampField = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(13).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto.getType()); + + TableFieldSchema timestampField1 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(7).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto1 = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField1, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto1.getType()); + + TableFieldSchema timestampField2 = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setTimestampPrecision(Int64Value.newBuilder().setValue(-1).build()) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto2 = + BQTableSchemaToProtoDescriptor.convertBQTableFieldToProtoField(timestampField2, 0, null); + assertEquals( + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, fieldDescriptorProto2.getType()); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java index 02b422fc83..8ffcc57bab 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClientTest.java index 6778e170f9..f049717a43 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryReadClientTest.java @@ -15,6 +15,10 @@ */ package com.google.cloud.bigquery.storage.v1; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.google.api.gax.core.NoCredentialsProvider; import com.google.api.gax.grpc.GaxGrpcProperties; import com.google.api.gax.grpc.GrpcStatusCode; @@ -42,14 +46,17 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -public class BigQueryReadClientTest { +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class BigQueryReadClientTest { private static MockBigQueryRead mockBigQueryRead; private static MockServiceHelper serviceHelper; private BigQueryReadClient client; @@ -57,8 +64,8 @@ public class BigQueryReadClientTest { private int retryCount; private Code lastRetryStatusCode; - @BeforeClass - public static void startStaticServer() { + @BeforeAll + static void startStaticServer() { mockBigQueryRead = new MockBigQueryRead(); serviceHelper = new MockServiceHelper( @@ -66,13 +73,13 @@ public static void startStaticServer() { serviceHelper.start(); } - @AfterClass - public static void stopServer() { + @AfterAll + static void stopServer() { serviceHelper.stop(); } - @Before - public void setUp() throws IOException { + @BeforeEach + void setUp() throws IOException { serviceHelper.reset(); channelProvider = serviceHelper.createChannelProvider(); retryCount = 0; @@ -95,14 +102,15 @@ public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) { client = BigQueryReadClient.create(settings); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } @Test @SuppressWarnings("all") - public void createReadSessionTest() { + void createReadSessionTest() { String name = "name3373707"; String table = "table110115790"; ReadSession expectedResponse = ReadSession.newBuilder().setName(name).setTable(table).build(); @@ -113,16 +121,19 @@ public void createReadSessionTest() { int maxStreamCount = 940837515; ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); - Assert.assertEquals(expectedResponse, actualResponse); + assertEquals(expectedResponse, actualResponse); List actualRequests = mockBigQueryRead.getRequests(); - Assert.assertEquals(1, actualRequests.size()); + assertEquals(1, actualRequests.size()); CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0); - Assert.assertEquals(parent, actualRequest.getParent()); - Assert.assertEquals(readSession, actualRequest.getReadSession()); - Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); - Assert.assertTrue( + assertEquals(parent, actualRequest.getParent()); + + assertEquals(readSession, actualRequest.getReadSession()); + + assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + + assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); @@ -130,25 +141,22 @@ public void createReadSessionTest() { @Test @SuppressWarnings("all") - public void createReadSessionExceptionTest() throws Exception { + void createReadSessionExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryRead.addException(exception); - try { - String parent = "parent-995424086"; - ReadSession readSession = ReadSession.newBuilder().build(); - int maxStreamCount = 940837515; + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; - client.createReadSession(parent, readSession, maxStreamCount); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception - } + assertThrows( + InvalidArgumentException.class, + () -> client.createReadSession(parent, readSession, maxStreamCount)); } @Test @SuppressWarnings("all") - public void readRowsTest() throws Exception { + void readRowsTest() throws Exception { long rowCount = 1340416618L; ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); mockBigQueryRead.addResponse(expectedResponse); @@ -160,16 +168,16 @@ public void readRowsTest() throws Exception { callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); - Assert.assertEquals(expectedResponse, actualResponses.get(0)); + assertEquals(1, actualResponses.size()); + assertEquals(expectedResponse, actualResponses.get(0)); - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsExceptionTest() throws Exception { + void readRowsExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryRead.addException(exception); ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); @@ -179,22 +187,18 @@ public void readRowsExceptionTest() throws Exception { ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); - try { - List actualResponses = responseObserver.future().get(); - Assert.fail("No exception thrown"); - } catch (ExecutionException e) { - Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); - InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); - Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); - } - - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { + void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { ApiException exception = new InternalException( new StatusRuntimeException( @@ -213,15 +217,15 @@ public void readRowsRetryingEOSExceptionTest() throws ExecutionException, Interr ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); } @Test @SuppressWarnings("all") - public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { + void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { ApiException exception = new InternalException( new StatusRuntimeException( @@ -240,15 +244,15 @@ public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, Inte ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); } @Test @SuppressWarnings("all") - public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() + void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() throws ExecutionException, InterruptedException { ApiException exception = new ResourceExhaustedException( @@ -267,23 +271,19 @@ public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); - try { - List actualResponses = responseObserver.future().get(); - Assert.fail("No exception thrown"); - } catch (ExecutionException e) { - Assert.assertTrue(e.getCause() instanceof ResourceExhaustedException); - ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); - Assert.assertEquals( - StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); - } - - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof ResourceExhaustedException); + ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); + assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsNoRetryForResourceExhaustedWithRetryInfo() + void readRowsNoRetryForResourceExhaustedWithRetryInfo() throws ExecutionException, InterruptedException { RetryInfo retryInfo = RetryInfo.newBuilder() @@ -329,9 +329,9 @@ public RetryInfo parseBytes(byte[] serialized) { ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java index 44823e55a4..77e0da1171 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQuerySchemaUtilTest.java @@ -15,20 +15,21 @@ */ package com.google.cloud.bigquery.storage.v1; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.google.cloud.bigquery.storage.test.SchemaTest.SupportedTypes; import com.google.cloud.bigquery.storage.test.SchemaTest.TestNestedFlexibleFieldName; import com.google.protobuf.Descriptors.Descriptor; import java.util.Arrays; import java.util.List; -import junit.framework.TestCase; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; -@RunWith(JUnit4.class) -public class BigQuerySchemaUtilTest extends TestCase { +class BigQuerySchemaUtilTest { @Test - public void testIsProtoCompatible() { + void testIsProtoCompatible() { List protoCompatibleNames = Arrays.asList("col_1", "name", "_0_"); List protoIncompatibleNames = Arrays.asList("0_col", "()", "列", "a-1"); protoCompatibleNames.stream() @@ -43,13 +44,15 @@ public void testIsProtoCompatible() { }); } - public void testGeneratePlaceholderFieldName() { + @Test + void testGeneratePlaceholderFieldName() { assertEquals("col_c3RyLeWIlw", BigQuerySchemaUtil.generatePlaceholderFieldName("str-列")); // Base64 url encodes "~/~/" to "fi9-Lw", we replaced - with _ to be proto compatible. assertEquals("col_fi9_Lw", BigQuerySchemaUtil.generatePlaceholderFieldName("~/~/")); } - public void testGetFieldName() { + @Test + void testGetFieldName() { // Test get name from annotations. Descriptor flexibleDescriptor = TestNestedFlexibleFieldName.getDescriptor(); assertEquals("str-列", BigQuerySchemaUtil.getFieldName(flexibleDescriptor.getFields().get(0))); @@ -57,7 +60,7 @@ public void testGetFieldName() { "nested-列", BigQuerySchemaUtil.getFieldName(flexibleDescriptor.getFields().get(1))); // Test get name without annotations. - Descriptor descriptor = TestNestedFlexibleFieldName.getDescriptor(); + Descriptor descriptor = SupportedTypes.getDescriptor(); assertEquals("int32_value", BigQuerySchemaUtil.getFieldName(descriptor.getFields().get(0))); assertEquals("int64_value", BigQuerySchemaUtil.getFieldName(descriptor.getFields().get(1))); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java index 64de69fc19..005b8b0ab0 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteClientTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java index 1916df6f3f..51fea1232b 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerPoolTest.java @@ -16,7 +16,7 @@ package com.google.cloud.bigquery.storage.v1; import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertThrows; +import static org.junit.jupiter.api.Assertions.assertThrows; import com.google.api.core.ApiFuture; import com.google.api.gax.batching.FlowController; @@ -45,13 +45,13 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; -@RunWith(JUnit4.class) -public class ConnectionWorkerPoolTest { +@Execution(ExecutionMode.SAME_THREAD) +class ConnectionWorkerPoolTest { private FakeBigQueryWrite testBigQueryWrite; private FakeScheduledExecutorService fakeExecutor; @@ -73,8 +73,8 @@ public class ConnectionWorkerPoolTest { .setMaxRetryDelay(org.threeten.bp.Duration.ofMinutes(MAX_RETRY_DELAY_MINUTES)) .build(); - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { testBigQueryWrite = new FakeBigQueryWrite(); serviceHelper = new MockServiceHelper( @@ -92,7 +92,7 @@ public void setUp() throws Exception { } @Test - public void testSingleTableConnection_noOverwhelmedConnection() throws Exception { + void testSingleTableConnection_noOverwhelmedConnection() throws Exception { // Set the max requests count to a large value so we will not scaling up. testSendRequestsToMultiTable( /* requestToSend= */ 100, @@ -103,7 +103,7 @@ public void testSingleTableConnection_noOverwhelmedConnection() throws Exception } @Test - public void testSingleTableConnections_overwhelmed() throws Exception { + void testSingleTableConnections_overwhelmed() throws Exception { // A connection will be considered overwhelmed when the requests count reach 5 (max 10). testSendRequestsToMultiTable( /* requestToSend= */ 100, @@ -114,7 +114,7 @@ public void testSingleTableConnections_overwhelmed() throws Exception { } @Test - public void testMultiTableConnection_noOverwhelmedConnection() throws Exception { + void testMultiTableConnection_noOverwhelmedConnection() throws Exception { // Set the max requests count to a large value so we will not scaling up. // All tables will share the two connections (2 becasue we set the min connections to be 2). testSendRequestsToMultiTable( @@ -126,7 +126,7 @@ public void testMultiTableConnection_noOverwhelmedConnection() throws Exception } @Test - public void testMultiTableConnections_overwhelmed_reachingMaximum() throws Exception { + void testMultiTableConnections_overwhelmed_reachingMaximum() throws Exception { // A connection will be considered overwhelmed when the requests count reach 5 (max 10). testSendRequestsToMultiTable( /* requestToSend= */ 100, @@ -137,7 +137,7 @@ public void testMultiTableConnections_overwhelmed_reachingMaximum() throws Excep } @Test - public void testMultiTableConnections_overwhelmed_overTotalLimit() throws Exception { + void testMultiTableConnections_overwhelmed_overTotalLimit() throws Exception { // A connection will be considered overwhelmed when the requests count reach 5 (max 10). testSendRequestsToMultiTable( /* requestToSend= */ 200, @@ -148,7 +148,7 @@ public void testMultiTableConnections_overwhelmed_overTotalLimit() throws Except } @Test - public void testMultiTableConnections_overwhelmed_notReachingMaximum() throws Exception { + void testMultiTableConnections_overwhelmed_notReachingMaximum() throws Exception { // A connection will be considered overwhelmed when the requests count reach 5 (max 10). testSendRequestsToMultiTable( /* requestToSend= */ 20, @@ -221,7 +221,7 @@ private void testSendRequestsToMultiTable( } @Test - public void testMultiStreamClosed_multiplexingEnabled() throws Exception { + void testMultiStreamClosed_multiplexingEnabled() throws Exception { ConnectionWorkerPool.setOptions( Settings.builder().setMaxConnectionsPerRegion(10).setMinConnectionsPerRegion(5).build()); ConnectionWorkerPool connectionWorkerPool = @@ -271,7 +271,7 @@ public void testMultiStreamClosed_multiplexingEnabled() throws Exception { } @Test - public void testMultiStreamAppend_appendWhileClosing() throws Exception { + void testMultiStreamAppend_appendWhileClosing() throws Exception { ConnectionWorkerPool.setOptions( Settings.builder().setMaxConnectionsPerRegion(10).setMinConnectionsPerRegion(5).build()); ConnectionWorkerPool connectionWorkerPool = @@ -334,7 +334,7 @@ public void testMultiStreamAppend_appendWhileClosing() throws Exception { } @Test - public void testCloseWhileAppending_noDeadlockHappen() throws Exception { + void testCloseWhileAppending_noDeadlockHappen() throws Exception { ConnectionWorkerPool.setOptions( Settings.builder().setMaxConnectionsPerRegion(10).setMinConnectionsPerRegion(5).build()); ConnectionWorkerPool connectionWorkerPool = @@ -400,7 +400,7 @@ public FakeBigQueryWriteImpl.Response get() { } @Test - public void testAppendWithRetry() throws Exception { + void testAppendWithRetry() throws Exception { ConnectionWorkerPool connectionWorkerPool = createConnectionWorkerPool( /* maxRequests= */ 1500, /* maxBytes= */ 100000, java.time.Duration.ofSeconds(5)); @@ -432,7 +432,7 @@ public void testAppendWithRetry() throws Exception { } @Test - public void testToTableName() { + void testToTableName() { assertThat(ConnectionWorkerPool.toTableName("projects/p/datasets/d/tables/t/streams/s")) .isEqualTo("projects/p/datasets/d/tables/t"); @@ -442,8 +442,7 @@ public void testToTableName() { } @Test - public void testCloseExternalClient() - throws IOException, InterruptedException, ExecutionException { + void testCloseExternalClient() throws IOException, InterruptedException, ExecutionException { StreamWriter.clearConnectionPool(); // Try append 100 requests. long appendCount = 100L; diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java index 1761247111..25b4a3f7e1 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ConnectionWorkerTest.java @@ -17,9 +17,9 @@ import static com.google.common.truth.Truth.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.api.core.ApiFuture; import com.google.api.gax.batching.FlowController; @@ -47,6 +47,7 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.vector.VarCharVector; @@ -60,13 +61,14 @@ import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.FieldType; import org.apache.arrow.vector.types.pojo.Schema; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class ConnectionWorkerTest { +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class ConnectionWorkerTest { private static final Logger log = Logger.getLogger(StreamWriter.class.getName()); private static final String TEST_STREAM_1 = "projects/p1/datasets/d1/tables/t1/streams/s1"; private static final String TEST_STREAM_2 = "projects/p2/datasets/d2/tables/t2/streams/s2"; @@ -84,8 +86,8 @@ public class ConnectionWorkerTest { private static MockServiceHelper serviceHelper; private BigQueryWriteClient client; - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { testBigQueryWrite = new FakeBigQueryWrite(); ConnectionWorker.setMaxInflightQueueWaitTime(300000); ConnectionWorker.setMaxInflightRequestWaitTime(Duration.ofMinutes(10)); @@ -103,8 +105,16 @@ public void setUp() throws Exception { .build()); } + @AfterEach + void cleanUp() throws InterruptedException { + serviceHelper.stop(); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); + } + @Test - public void testMultiplexedAppendSuccess_NonNullTraceId() throws Exception { + void testMultiplexedAppendSuccess_NonNullTraceId() throws Exception { testMultiplexedIngestion( /* sw1TraceId= */ "header_1:trailer_1", /* sw2TraceId= */ "header_2:trailer_2", @@ -113,7 +123,7 @@ public void testMultiplexedAppendSuccess_NonNullTraceId() throws Exception { } @Test - public void testMultiplexedAppendSuccess_EmptyTraceId() throws Exception { + void testMultiplexedAppendSuccess_EmptyTraceId() throws Exception { testMultiplexedIngestion( /* sw1TraceId= */ "header_1:trailer_1", /* sw2TraceId= */ "", @@ -242,10 +252,10 @@ private void testMultiplexedIngestion( } @Test - public void testMultiplexedAppendSuccess_MixEmptyAndNonEmptyTraceId() throws Exception {} + void testMultiplexedAppendSuccess_MixEmptyAndNonEmptyTraceId() throws Exception {} @Test - public void testAppendInSameStream_switchSchema() throws Exception { + void testAppendInSameStream_switchSchema() throws Exception { try (ConnectionWorker connectionWorker = createMultiplexedConnectionWorker()) { long appendCount = 20; for (long i = 0; i < appendCount; i++) { @@ -367,7 +377,7 @@ public void testAppendInSameStream_switchSchema() throws Exception { } @Test - public void testAppendInSameStreamSwitchArrowSchema() throws Exception { + void testAppendInSameStreamSwitchArrowSchema() throws Exception { try (ConnectionWorker connectionWorker = createMultiplexedConnectionWorker()) { long appendCount = 60; for (long i = 0; i < appendCount; i++) { @@ -485,7 +495,7 @@ public void testAppendInSameStreamSwitchArrowSchema() throws Exception { } @Test - public void testAppendButInflightQueueFull() throws Exception { + void testAppendButInflightQueueFull() throws Exception { ProtoSchema schema1 = createProtoSchema("foo"); StreamWriter sw1 = StreamWriter.newBuilder(TEST_STREAM_1, client) @@ -545,7 +555,7 @@ public void testAppendButInflightQueueFull() throws Exception { } @Test - public void testThrowExceptionWhileWithinAppendLoop() throws Exception { + void testThrowExceptionWhileWithinAppendLoop() throws Exception { ProtoSchema schema1 = createProtoSchema("foo"); StreamWriter sw1 = StreamWriter.newBuilder(TEST_STREAM_1, client) @@ -617,7 +627,7 @@ public void testThrowExceptionWhileWithinAppendLoop() throws Exception { } @Test - public void testLocationMismatch() throws Exception { + void testLocationMismatch() throws Exception { ProtoSchema schema1 = createProtoSchema("foo"); StreamWriter sw1 = StreamWriter.newBuilder(TEST_STREAM_1, client) @@ -656,7 +666,7 @@ public void testLocationMismatch() throws Exception { } @Test - public void testStreamNameMismatch() throws Exception { + void testStreamNameMismatch() throws Exception { ProtoSchema schema1 = createProtoSchema("foo"); StreamWriter sw1 = StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(schema1).build(); @@ -693,7 +703,7 @@ public void testStreamNameMismatch() throws Exception { } @Test - public void testExponentialBackoff() throws Exception { + void testExponentialBackoff() throws Exception { assertThat(ConnectionWorker.calculateSleepTimeMilli(0)).isEqualTo(50); assertThat(ConnectionWorker.calculateSleepTimeMilli(5)).isEqualTo(1600); assertThat(ConnectionWorker.calculateSleepTimeMilli(100)).isEqualTo(60000); @@ -842,7 +852,7 @@ private com.google.cloud.bigquery.storage.v1.ArrowRecordBatch createArrowRecordB } @Test - public void testLoadCompare_compareLoad() { + void testLoadCompare_compareLoad() { // In flight bytes bucket is split as per 1024 requests per bucket. // When in flight bytes is in lower bucket, even destination count is higher and request count // is higher, the load is still smaller. @@ -862,7 +872,7 @@ public void testLoadCompare_compareLoad() { } @Test - public void testLoadIsOverWhelmed() { + void testLoadIsOverWhelmed() { // Only in flight request is considered in current overwhelmed calculation. Load load1 = ConnectionWorker.Load.create(60, 10, 100, 90, 100); assertThat(load1.isOverwhelmed()).isTrue(); @@ -872,7 +882,7 @@ public void testLoadIsOverWhelmed() { } @Test - public void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws Exception { + void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws Exception { ProtoSchema schema1 = createProtoSchema("foo"); ConnectionWorker.setMaxInflightRequestWaitTime(Duration.ofSeconds(1)); StreamWriter sw1 = @@ -941,16 +951,16 @@ public void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws E connectionWorker.close(); long timeDiff = System.currentTimeMillis() - startCloseTime; assertTrue( + timeDiff <= (appendCount * durationSleep.toMillis()), "timeDiff: " + timeDiff + " is more than total durationSleep: " - + (appendCount * durationSleep.toMillis()), - timeDiff <= (appendCount * durationSleep.toMillis())); + + (appendCount * durationSleep.toMillis())); assertTrue(connectionWorker.isUserClosed()); } @Test - public void testLongTimeIdleWontFail() throws Exception { + void testLongTimeIdleWontFail() throws Exception { ProtoSchema schema1 = createProtoSchema("foo"); ConnectionWorker.setMaxInflightRequestWaitTime(Duration.ofSeconds(1)); StreamWriter sw1 = @@ -1025,7 +1035,7 @@ private void exerciseOpenTelemetryAttributesWithStreamNames(String streamName, S } @Test - public void testOpenTelemetryAttributesWithStreamNames() throws Exception { + void testOpenTelemetryAttributesWithStreamNames() throws Exception { exerciseOpenTelemetryAttributesWithStreamNames( "projects/my_project/datasets/my_dataset/tables/my_table/streams/my_stream", "projects/my_project/datasets/my_dataset/tables/my_table"); @@ -1068,7 +1078,7 @@ void exerciseOpenTelemetryAttributesWithTraceId( } @Test - public void testOpenTelemetryAttributesWithTraceId() throws Exception { + void testOpenTelemetryAttributesWithTraceId() throws Exception { exerciseOpenTelemetryAttributesWithTraceId(null, null, null, null); exerciseOpenTelemetryAttributesWithTraceId("a:b:c", null, null, null); exerciseOpenTelemetryAttributesWithTraceId( @@ -1098,7 +1108,7 @@ public void testOpenTelemetryAttributesWithTraceId() throws Exception { } @Test - public void testDoubleDisconnectWithShorterRetryDuration() throws Exception { + void testDoubleDisconnectWithShorterRetryDuration() throws Exception { // simulate server disconnect due to idle stream testBigQueryWrite.setFailedStatus( Status.ABORTED.withDescription( @@ -1141,7 +1151,7 @@ public void testDoubleDisconnectWithShorterRetryDuration() throws Exception { } @Test - public void testLocationName() throws Exception { + void testLocationName() throws Exception { assertEquals( "projects/p1/locations/us", ConnectionWorker.getRoutingHeader(TEST_STREAM_1, "us")); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWrite.java index 15cda4029d..c11ed82d37 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWrite.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWrite.java @@ -43,7 +43,7 @@ public List getRequests() { return new LinkedList(serviceImpl.getCapturedRequests()); } - public void waitForResponseScheduled() throws InterruptedException { + void waitForResponseScheduled() throws InterruptedException { serviceImpl.waitForResponseScheduled(); } @@ -72,7 +72,7 @@ public void addResponse(AbstractMessage response) { * Add a response supplier to end of list. This supplier can be used to simulate retries or other * forms of behavior. */ - public void addResponse(Supplier response) { + void addResponse(Supplier response) { serviceImpl.addResponse(response); } @@ -81,7 +81,7 @@ public void addException(Exception exception) { serviceImpl.addConnectionError(exception); } - public void addStatusException(com.google.rpc.Status status) { + void addStatusException(com.google.rpc.Status status) { serviceImpl.addException(status); } @@ -95,19 +95,19 @@ public void reset() { serviceImpl.reset(); } - public void setResponseSleep(Duration sleep) { + void setResponseSleep(Duration sleep) { serviceImpl.setResponseSleep(sleep); } - public void setCloseEveryNAppends(long closeAfter) { + void setCloseEveryNAppends(long closeAfter) { serviceImpl.setCloseEveryNAppends(closeAfter); } - public void setTimesToClose(long numberTimesToClose) { + void setTimesToClose(long numberTimesToClose) { serviceImpl.setTimesToClose(numberTimesToClose); } - public void setCloseForeverAfter(long closeForeverAfter) { + void setCloseForeverAfter(long closeForeverAfter) { serviceImpl.setCloseForeverAfter(closeForeverAfter); } @@ -115,19 +115,19 @@ public long getConnectionCount() { return serviceImpl.getConnectionCount(); } - public void setExecutor(ScheduledExecutorService executor) { + void setExecutor(ScheduledExecutorService executor) { serviceImpl.setExecutor(executor); } - public void setFailedStatus(Status failedStatus) { + void setFailedStatus(Status failedStatus) { serviceImpl.setFailedStatus(failedStatus); } - public void setReturnErrorDuringExclusiveStreamRetry(boolean retryOnError) { + void setReturnErrorDuringExclusiveStreamRetry(boolean retryOnError) { serviceImpl.setReturnErrorDuringExclusiveStreamRetry(retryOnError); } - public void setVerifyOffset(boolean verifyOffset) { + void setVerifyOffset(boolean verifyOffset) { serviceImpl.setVerifyOffset(verifyOffset); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWriteImpl.java index 60938becec..d8cbd758b0 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeBigQueryWriteImpl.java @@ -147,7 +147,7 @@ public void flushRows( } } - public void waitForResponseScheduled() throws InterruptedException { + void waitForResponseScheduled() throws InterruptedException { responseSemaphore.acquire(); } @@ -156,7 +156,7 @@ public long getConnectionCount() { return connectionCount; } - public void setFailedStatus(Status failedStatus) { + void setFailedStatus(Status failedStatus) { this.failedStatus = failedStatus; } @@ -304,7 +304,7 @@ public FakeBigQueryWriteImpl setResponseSleep(Duration responseSleep) { * Add a response to end of list. Response can be either an record, or an exception. All repsones * must be set up before any rows are appended. */ - public void addResponse(AppendRowsResponse appendRowsResponse) { + void addResponse(AppendRowsResponse appendRowsResponse) { responses.add(() -> new Response(appendRowsResponse)); } @@ -312,7 +312,7 @@ public void addResponse(AppendRowsResponse appendRowsResponse) { * Add a response supplier to end of list. This supplier can be used to simulate retries or other * forms of behavior. */ - public void addResponse(Supplier response) { + void addResponse(Supplier response) { responses.add(response); } @@ -335,7 +335,7 @@ public FakeBigQueryWriteImpl addConnectionError(Throwable error) { * Returns the given status, instead of a valid response. This should be treated as an exception * on the other side. This will not stop processing. */ - public void addException(com.google.rpc.Status status) { + void addException(com.google.rpc.Status status) { responses.add(() -> new Response(AppendRowsResponse.newBuilder().setError(status).build())); } @@ -343,15 +343,15 @@ public void addException(com.google.rpc.Status status) { * Will abort the connection instead of return a valid response. This should NOT be used to return * a retriable error (as that will cause an infinite loop.) */ - public void addNonRetriableError(com.google.rpc.Status status) { + void addNonRetriableError(com.google.rpc.Status status) { responses.add(() -> new Response(AppendRowsResponse.newBuilder().setError(status).build())); } - public void setVerifyOffset(boolean verifyOffset) { + void setVerifyOffset(boolean verifyOffset) { this.verifyOffset = verifyOffset; } - public void setReturnErrorDuringExclusiveStreamRetry(boolean retryOnError) { + void setReturnErrorDuringExclusiveStreamRetry(boolean retryOnError) { this.returnErrorDuringExclusiveStreamRetry = retryOnError; } @@ -363,7 +363,7 @@ public List getCapturedWriteRequests() { return new ArrayList(writeRequests); } - public void reset() { + void reset() { requests.clear(); responses.clear(); } @@ -377,7 +377,7 @@ public void reset() { * closeAfter should be large enough to give the client some opportunity to receive some of the * messages. **/ - public void setCloseEveryNAppends(long closeAfter) { + void setCloseEveryNAppends(long closeAfter) { this.closeAfter = closeAfter; } @@ -390,13 +390,13 @@ public void setCloseEveryNAppends(long closeAfter) { * infinite loop. Therefore set the times to close to 1. This will send the two records, force * an abort an retry, and then reprocess the records to completion. **/ - public void setTimesToClose(long numberTimesToClose) { + void setTimesToClose(long numberTimesToClose) { this.numberTimesToClose = numberTimesToClose; } /* The connection will forever return failure after numberTimesToClose. This option shouldn't * be used together with setCloseEveryNAppends and setTimesToClose*/ - public void setCloseForeverAfter(long closeForeverAfter) { + void setCloseForeverAfter(long closeForeverAfter) { this.closeForeverAfter = closeForeverAfter; } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeClock.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeClock.java index 6a83c820c7..526932442f 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeClock.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeClock.java @@ -25,7 +25,7 @@ public class FakeClock implements ApiClock { private final AtomicLong millis = new AtomicLong(); // Advances the clock value by {@code time} in {@code timeUnit}. - public void advance(long time, TimeUnit timeUnit) { + void advance(long time, TimeUnit timeUnit) { millis.addAndGet(timeUnit.toMillis(time)); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeScheduledExecutorService.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeScheduledExecutorService.java index 68bdfbf6d1..772cf0186d 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeScheduledExecutorService.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/FakeScheduledExecutorService.java @@ -91,7 +91,7 @@ public ScheduledFuture scheduleWithFixedDelay( * This will advance the reference time of the executor and execute (in the same thread) any * outstanding callable which execution time has passed. */ - public void advanceTime(Duration toAdvance) { + void advanceTime(Duration toAdvance) { LOG.info( "Advance to time to:" + Instant.ofEpochMilli(clock.millisTime() + toAdvance.toMillis()).toString()); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java index 2889b6b4f0..bf8202201f 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java @@ -15,9 +15,11 @@ */ package com.google.cloud.bigquery.storage.v1; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.api.client.util.Sleeper; import com.google.api.core.ApiFuture; @@ -58,18 +60,17 @@ import java.util.Map; import java.util.UUID; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import org.json.JSONArray; import org.json.JSONObject; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.function.ThrowingRunnable; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; -@RunWith(JUnit4.class) -public class JsonStreamWriterTest { +@Execution(ExecutionMode.SAME_THREAD) +class JsonStreamWriterTest { private static final int NUMERIC_SCALE = 9; private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/_default"; @@ -132,10 +133,10 @@ public class JsonStreamWriterTest { .setName("test_string") .build(); - public JsonStreamWriterTest() throws DescriptorValidationException {} + JsonStreamWriterTest() throws DescriptorValidationException {} - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { testBigQueryWrite = new FakeBigQueryWrite(); serviceHelper = new MockServiceHelper(UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); @@ -155,9 +156,12 @@ public void setUp() throws Exception { StreamWriter.cleanUp(); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { serviceHelper.stop(); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } private JsonStreamWriter.Builder getTestJsonStreamWriterBuilder( @@ -176,34 +180,30 @@ private JsonStreamWriter.Builder getTestJsonStreamWriterBuilder(String testStrea } @Test - public void testTwoParamNewBuilder_nullSchema() { - try { - getTestJsonStreamWriterBuilder(null, TABLE_SCHEMA); - Assert.fail("expected NullPointerException"); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "StreamOrTableName is null."); - } + void testTwoParamNewBuilder_nullSchema() { + NullPointerException e = + assertThrows( + NullPointerException.class, () -> getTestJsonStreamWriterBuilder(null, TABLE_SCHEMA)); + assertEquals(e.getMessage(), "StreamOrTableName is null."); } @Test - public void testTwoParamNewBuilder_nullStream() { - try { - getTestJsonStreamWriterBuilder(TEST_STREAM, null); - Assert.fail("expected NullPointerException"); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "TableSchema is null."); - } + void testTwoParamNewBuilder_nullStream() { + NullPointerException e = + assertThrows( + NullPointerException.class, () -> getTestJsonStreamWriterBuilder(TEST_STREAM, null)); + assertEquals(e.getMessage(), "TableSchema is null."); } @Test - public void testTwoParamNewBuilder() + void testTwoParamNewBuilder() throws DescriptorValidationException, IOException, InterruptedException { JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); assertEquals(TEST_STREAM, writer.getStreamName()); } @Test - public void testConstructWriterUsingDefaultStreamName() + void testConstructWriterUsingDefaultStreamName() throws DescriptorValidationException, IOException, InterruptedException { JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_TABLE_DEFAULT, TABLE_SCHEMA).build(); @@ -211,7 +211,7 @@ public void testConstructWriterUsingDefaultStreamName() } @Test - public void testSingleAppendSimpleJson() throws Exception { + void testSingleAppendSimpleJson() throws Exception { FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); JSONObject foo = new JSONObject(); foo.put("foo", "allen"); @@ -254,7 +254,7 @@ public void testSingleAppendSimpleJson() throws Exception { } @Test - public void testSingleAppendSimpleGson() throws Exception { + void testSingleAppendSimpleGson() throws Exception { FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); JsonObject foo = new JsonObject(); foo.addProperty("foo", "allen"); @@ -297,7 +297,7 @@ public void testSingleAppendSimpleGson() throws Exception { } @Test - public void testFlexibleColumnAppend() throws Exception { + void testFlexibleColumnAppend() throws Exception { TableFieldSchema field = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.STRING) @@ -344,7 +344,7 @@ public void testFlexibleColumnAppend() throws Exception { } @Test - public void testSpecialTypeAppend() throws Exception { + void testSpecialTypeAppend() throws Exception { TableFieldSchema field = TableFieldSchema.newBuilder() .setName("time") @@ -394,7 +394,7 @@ public void testSpecialTypeAppend() throws Exception { } @Test - public void testRepeatedByteStringAppend() throws Exception { + void testRepeatedByteStringAppend() throws Exception { TableFieldSchema NON_REPEATED_A = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.NUMERIC) @@ -525,7 +525,7 @@ public void testRepeatedByteStringAppend() throws Exception { } @Test - public void testSingleAppendMultipleSimpleJson() throws Exception { + void testSingleAppendMultipleSimpleJson() throws Exception { FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); JSONObject foo = new JSONObject(); foo.put("foo", "allen"); @@ -579,7 +579,7 @@ public void testSingleAppendMultipleSimpleJson() throws Exception { } @Test - public void testMultipleAppendSimpleJson() throws Exception { + void testMultipleAppendSimpleJson() throws Exception { FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); JSONObject foo = new JSONObject(); foo.put("foo", "allen"); @@ -634,7 +634,7 @@ public void testMultipleAppendSimpleJson() throws Exception { } @Test - public void testAppendOutOfRangeException() throws Exception { + void testAppendOutOfRangeException() throws Exception { try (JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { testBigQueryWrite.addResponse( @@ -646,17 +646,13 @@ public void testAppendOutOfRangeException() throws Exception { JSONArray jsonArr = new JSONArray(); jsonArr.put(foo); ApiFuture appendFuture = writer.append(jsonArr); - try { - appendFuture.get(); - Assert.fail("expected ExecutionException"); - } catch (ExecutionException ex) { - assertEquals(ex.getCause().getMessage(), "OUT_OF_RANGE: "); - } + ExecutionException ex = assertThrows(ExecutionException.class, () -> appendFuture.get()); + assertEquals(ex.getCause().getMessage(), "OUT_OF_RANGE: "); } } @Test - public void testCreateDefaultStream_withNoSchemaPassedIn() throws Exception { + void testCreateDefaultStream_withNoSchemaPassedIn() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).addFields(1, TEST_STRING).build(); testBigQueryWrite.addResponse( @@ -677,7 +673,7 @@ public void testCreateDefaultStream_withNoSchemaPassedIn() throws Exception { } @Test - public void testCreateDefaultStream_withNoClientPassedIn() throws Exception { + void testCreateDefaultStream_withNoClientPassedIn() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).addFields(1, TEST_STRING).build(); testBigQueryWrite.addResponse( @@ -707,7 +703,7 @@ public void testCreateDefaultStream_withNoClientPassedIn() throws Exception { } @Test - public void testCreateDefaultStreamWrongLocation() { + void testCreateDefaultStreamWrongLocation() { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).addFields(1, TEST_STRING).build(); testBigQueryWrite.addResponse( @@ -719,21 +715,18 @@ public void testCreateDefaultStreamWrongLocation() { IllegalArgumentException ex = assertThrows( IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - JsonStreamWriter.newBuilder(TEST_TABLE, client) - .setChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()) - .setLocation("bb") - .build(); - } + () -> { + JsonStreamWriter.newBuilder(TEST_TABLE, client) + .setChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .setLocation("bb") + .build(); }); assertEquals("Specified location bb does not match the system value aa", ex.getMessage()); } @Test - public void testSimpleSchemaUpdate() throws Exception { + void testSimpleSchemaUpdate() throws Exception { testBigQueryWrite.addResponse( WriteStream.newBuilder() .setName(TEST_STREAM) @@ -832,7 +825,7 @@ public void testSimpleSchemaUpdate() throws Exception { } @Test - public void testSimpleSchemaUpdate_skipRefreshWriterIfSchemaProvided() throws Exception { + void testSimpleSchemaUpdate_skipRefreshWriterIfSchemaProvided() throws Exception { testBigQueryWrite.addResponse( WriteStream.newBuilder() .setName(TEST_STREAM) @@ -914,7 +907,7 @@ public void testSimpleSchemaUpdate_skipRefreshWriterIfSchemaProvided() throws Ex } @Test - public void testSimpleSchemaUpdate_withInterpretationMap() throws Exception { + void testSimpleSchemaUpdate_withInterpretationMap() throws Exception { testBigQueryWrite.addResponse( WriteStream.newBuilder() .setName(TEST_STREAM) @@ -1003,7 +996,7 @@ public void testSimpleSchemaUpdate_withInterpretationMap() throws Exception { } @Test - public void testWithoutIgnoreUnknownFieldsUpdateImmeidateSuccess() throws Exception { + void testWithoutIgnoreUnknownFieldsUpdateImmeidateSuccess() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); TableSchema updatedSchema = TableSchema.newBuilder() @@ -1041,7 +1034,7 @@ public void testWithoutIgnoreUnknownFieldsUpdateImmeidateSuccess() throws Except } @Test - public void testWithoutIgnoreUnknownFieldsUpdateSecondSuccess() throws Exception { + void testWithoutIgnoreUnknownFieldsUpdateSecondSuccess() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); TableSchema updatedSchema = TableSchema.newBuilder() @@ -1077,7 +1070,7 @@ public void testWithoutIgnoreUnknownFieldsUpdateSecondSuccess() throws Exception } @Test - public void testSchemaUpdateInMultiplexing_singleConnection() throws Exception { + void testSchemaUpdateInMultiplexing_singleConnection() throws Exception { // Set min connection count to be 1 to force sharing connection. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1174,7 +1167,7 @@ public void testSchemaUpdateInMultiplexing_singleConnection() throws Exception { } @Test - public void testMissingValueInterpretation_multiplexingCase() throws Exception { + void testMissingValueInterpretation_multiplexingCase() throws Exception { // Set min connection count to be 1 to force sharing connection. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1247,7 +1240,7 @@ public void testMissingValueInterpretation_multiplexingCase() throws Exception { } @Test - public void testSchemaUpdateInMultiplexing_multipleWriterForSameStreamName() throws Exception { + void testSchemaUpdateInMultiplexing_multipleWriterForSameStreamName() throws Exception { // Set min connection count to be 1 to force sharing connection. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1337,7 +1330,7 @@ public void testSchemaUpdateInMultiplexing_multipleWriterForSameStreamName() thr } @Test - public void testSchemaUpdateInMultiplexing_IgnoreUpdateIfTimeStampNewer() throws Exception { + void testSchemaUpdateInMultiplexing_IgnoreUpdateIfTimeStampNewer() throws Exception { // Set min connection count to be 1 to force sharing connection. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1428,7 +1421,7 @@ public void testSchemaUpdateInMultiplexing_IgnoreUpdateIfTimeStampNewer() throws } @Test - public void testWithoutIgnoreUnknownFieldsUpdateFail() throws Exception { + void testWithoutIgnoreUnknownFieldsUpdateFail() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); // GetWriteStream is called once but failed to update to the right schema. testBigQueryWrite.addResponse( @@ -1444,20 +1437,17 @@ public void testWithoutIgnoreUnknownFieldsUpdateFail() throws Exception { JSONArray jsonArr = new JSONArray(); jsonArr.put(foo); jsonArr.put(bar); - try { - ApiFuture appendFuture = writer.append(jsonArr); - Assert.fail("expected ExecutionException"); - } catch (AppendSerializationError ex) { - assertEquals( - "The source object has fields unknown to BigQuery: root.test_unknown.", - ex.getRowIndexToErrorMessage().get(1)); - assertEquals(TEST_STREAM, ex.getStreamName()); - } + AppendSerializationError ex = + assertThrows(AppendSerializationError.class, () -> writer.append(jsonArr)); + assertEquals( + "The source object has fields unknown to BigQuery: root.test_unknown.", + ex.getRowIndexToErrorMessage().get(1)); + assertEquals(TEST_STREAM, ex.getStreamName()); } } @Test - public void testWithIgnoreUnknownFields() throws Exception { + void testWithIgnoreUnknownFields() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); try (JsonStreamWriter writer = JsonStreamWriter.newBuilder(TEST_STREAM, tableSchema) @@ -1480,7 +1470,7 @@ public void testWithIgnoreUnknownFields() throws Exception { } @Test - public void testFlowControlSetting() throws Exception { + void testFlowControlSetting() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); try (JsonStreamWriter writer = JsonStreamWriter.newBuilder(TEST_STREAM, tableSchema) @@ -1500,11 +1490,8 @@ public void testFlowControlSetting() throws Exception { StatusRuntimeException ex = assertThrows( StatusRuntimeException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - writer.append(jsonArr); - } + () -> { + writer.append(jsonArr); }); assertEquals(ex.getStatus().getCode(), Status.RESOURCE_EXHAUSTED.getCode()); assertTrue( @@ -1519,7 +1506,7 @@ public void run() throws Throwable { // This is to test the new addition didn't break previous settings, i.e., sets the inflight limit // without limit beahvior. @Test - public void testFlowControlSettingNoLimitBehavior() throws Exception { + void testFlowControlSettingNoLimitBehavior() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(0, TEST_INT).build(); try (JsonStreamWriter writer = JsonStreamWriter.newBuilder(TEST_STREAM, tableSchema) @@ -1540,7 +1527,7 @@ public void testFlowControlSettingNoLimitBehavior() throws Exception { } @Test - public void testMultipleAppendSerializationErrors() + void testMultipleAppendSerializationErrors() throws DescriptorValidationException, IOException, InterruptedException { FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); JSONObject foo = new JSONObject(); @@ -1563,23 +1550,20 @@ public void testMultipleAppendSerializationErrors() try (JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - try { - ApiFuture appendFuture = writer.append(jsonArr); - Assert.fail("expected AppendSerializationError"); - } catch (AppendSerializationError appendSerializationError) { - Map rowIndexToErrorMessage = - appendSerializationError.getRowIndexToErrorMessage(); - assertEquals( - ImmutableMap.of( - 0, "The source object has fields unknown to BigQuery: root.not_foo.", - 2, "The source object has fields unknown to BigQuery: root.not_bar."), - rowIndexToErrorMessage); - } + AppendSerializationError appendSerializationError = + assertThrows(AppendSerializationError.class, () -> writer.append(jsonArr)); + Map rowIndexToErrorMessage = + appendSerializationError.getRowIndexToErrorMessage(); + assertEquals( + ImmutableMap.of( + 0, "The source object has fields unknown to BigQuery: root.not_foo.", + 2, "The source object has fields unknown to BigQuery: root.not_bar."), + rowIndexToErrorMessage); } } @Test - public void testBadStringToNumericRowError() + void testBadStringToNumericRowError() throws DescriptorValidationException, IOException, InterruptedException { TableSchema TABLE_SCHEMA = TableSchema.newBuilder() @@ -1601,38 +1585,34 @@ public void testBadStringToNumericRowError() try (JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - try { - ApiFuture appendFuture = writer.append(jsonArr); - Assert.fail("expected AppendSerializationError"); - } catch (AppendSerializationError appendSerializationError) { - Map rowIndexToErrorMessage = - appendSerializationError.getRowIndexToErrorMessage(); - assertEquals(1, rowIndexToErrorMessage.size()); - assertTrue( - rowIndexToErrorMessage - .get(0) - .startsWith("Field root.test_field_type failed to convert to NUMERIC. Error:")); - } + AppendSerializationError appendSerializationError = + assertThrows(AppendSerializationError.class, () -> writer.append(jsonArr)); + Map rowIndexToErrorMessage = + appendSerializationError.getRowIndexToErrorMessage(); + assertEquals(1, rowIndexToErrorMessage.size()); + assertTrue( + rowIndexToErrorMessage + .get(0) + .startsWith("Field root.test_field_type failed to convert to NUMERIC. Error:")); } } @Test - public void testWriterId() - throws DescriptorValidationException, IOException, InterruptedException { + void testWriterId() throws DescriptorValidationException, IOException, InterruptedException { JsonStreamWriter writer1 = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); - Assert.assertFalse(writer1.getWriterId().isEmpty()); + assertFalse(writer1.getWriterId().isEmpty()); JsonStreamWriter writer2 = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); - Assert.assertFalse(writer2.getWriterId().isEmpty()); - Assert.assertNotEquals(writer1.getWriterId(), writer2.getWriterId()); + assertFalse(writer2.getWriterId().isEmpty()); + assertNotEquals(writer1.getWriterId(), writer2.getWriterId()); } @Test - public void testIsDone() throws DescriptorValidationException, IOException, InterruptedException { + void testIsDone() throws DescriptorValidationException, IOException, InterruptedException { JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); - Assert.assertFalse(writer.isClosed()); + assertFalse(writer.isClosed()); writer.close(); - Assert.assertTrue(writer.isClosed()); - Assert.assertTrue(writer.isUserClosed()); + assertTrue(writer.isClosed()); + assertTrue(writer.isUserClosed()); } private AppendRowsResponse createAppendResponse(long offset) { @@ -1643,7 +1623,7 @@ private AppendRowsResponse createAppendResponse(long offset) { } @Test - public void testAppendWithMissingValueMap() throws Exception { + void testAppendWithMissingValueMap() throws Exception { TableFieldSchema field = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.STRING) @@ -1685,7 +1665,7 @@ public void testAppendWithMissingValueMap() throws Exception { } @Test - public void testWrongCompressionType() throws Exception { + void testWrongCompressionType() throws Exception { IllegalArgumentException ex = assertThrows( IllegalArgumentException.class, diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java index 2e622e8966..bc54472159 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java @@ -15,8 +15,9 @@ */ package com.google.cloud.bigquery.storage.v1; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.cloud.bigquery.storage.test.JsonTest.*; import com.google.cloud.bigquery.storage.test.SchemaTest.*; @@ -26,8 +27,11 @@ import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors.Descriptor; import com.google.protobuf.DynamicMessage; +import com.google.protobuf.Int64Value; import com.google.protobuf.Message; +import com.google.protobuf.Timestamp; import java.math.BigDecimal; +import java.time.Instant; import java.time.LocalTime; import java.util.ArrayList; import java.util.Collection; @@ -38,13 +42,9 @@ import java.util.logging.Logger; import org.json.JSONArray; import org.json.JSONObject; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; -@RunWith(JUnit4.class) -public class JsonToProtoMessageTest { +class JsonToProtoMessageTest { private static final Logger LOG = Logger.getLogger(JsonToProtoMessageTest.class.getName()); private static ImmutableMap AllTypesToDebugMessageTest = new ImmutableMap.Builder() @@ -513,6 +513,20 @@ public class JsonToProtoMessageTest { .setMode(TableFieldSchema.Mode.REPEATED) .setName("test_json") .build(); + final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .setName("test_timestamp_higher_precision") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); + private final TableFieldSchema TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_timestamp_higher_precision_repeated") + .setTimestampPrecision(Int64Value.newBuilder().setValue(12).build()) + .build(); private final TableSchema COMPLEX_TABLE_SCHEMA = TableSchema.newBuilder() .addFields(0, TEST_INT) @@ -546,10 +560,12 @@ public class JsonToProtoMessageTest { .addFields(28, TEST_BIGNUMERIC_DOUBLE) .addFields(29, TEST_INTERVAL) .addFields(30, TEST_JSON) + .addFields(31, TEST_TIMESTAMP_HIGHER_PRECISION) + .addFields(32, TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) .build(); @Test - public void testDifferentNameCasing() throws Exception { + void testDifferentNameCasing() throws Exception { TestInt64 expectedProto = TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).build(); @@ -564,7 +580,7 @@ public void testDifferentNameCasing() throws Exception { } @Test - public void testBool() throws Exception { + void testBool() throws Exception { TestBool expectedProto = TestBool.newBuilder().setBool(true).setUppercase(true).setLowercase(false).build(); JSONObject json = new JSONObject(); @@ -577,7 +593,7 @@ public void testBool() throws Exception { } @Test - public void testInt64() throws Exception { + void testInt64() throws Exception { TestInt64 expectedProto = TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).setString(1).build(); JSONObject json = new JSONObject(); @@ -592,7 +608,7 @@ public void testInt64() throws Exception { } @Test - public void testInt64Extended() throws Exception { + void testInt64Extended() throws Exception { TestInt64 expectedProto = TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).setString(1).build(); Map map = new HashMap(); @@ -608,7 +624,7 @@ public void testInt64Extended() throws Exception { } @Test - public void testInt64Repeated() throws Exception { + void testInt64Repeated() throws Exception { RepeatedInt64 expectedProto = RepeatedInt64.newBuilder() .addTestRepeated(1) @@ -632,7 +648,7 @@ public void testInt64Repeated() throws Exception { } @Test - public void testInt32() throws Exception { + void testInt32() throws Exception { TestInt32 expectedProto = TestInt32.newBuilder().setByte(1).setShort(1).setInt(1).setString(1).build(); JSONObject json = new JSONObject(); @@ -646,22 +662,21 @@ public void testInt32() throws Exception { } @Test - public void testInt32NotMatchInt64() throws Exception { + void testInt32NotMatchInt64() throws Exception { JSONObject json = new JSONObject(); json.put("byte", (byte) 1); json.put("short", (short) 1); json.put("int", 1L); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt32.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("JSONObject does not have a int32 field at root.int.")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt32.getDescriptor(), json)); + assertTrue(e.getMessage().contains("JSONObject does not have a int32 field at root.int.")); } @Test - public void testDateTimeMismatch() throws Exception { + void testDateTimeMismatch() throws Exception { TableFieldSchema field = TableFieldSchema.newBuilder() .setName("datetime") @@ -671,15 +686,13 @@ public void testDateTimeMismatch() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); JSONObject json = new JSONObject(); json.put("datetime", 1.0); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage( - TestDatetime.getDescriptor(), tableSchema, json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage().contains("JSONObject does not have a int64 field at root.datetime.")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestDatetime.getDescriptor(), tableSchema, json)); + assertTrue(e.getMessage().contains("JSONObject does not have a int64 field at root.datetime.")); } private void dateTimeMatch_Internal(String jsonVal, Long expectedVal) throws Exception { @@ -700,7 +713,7 @@ private void dateTimeMatch_Internal(String jsonVal, Long expectedVal) throws Exc } @Test - public void testDateTimeMatch() throws Exception { + void testDateTimeMatch() throws Exception { dateTimeMatch_Internal("2021-09-27T20:51:10.752", 142258614586538368L); dateTimeMatch_Internal("2021-09-27t20:51:10.752", 142258614586538368L); dateTimeMatch_Internal("2021-09-27 20:51:10.752", 142258614586538368L); @@ -711,7 +724,7 @@ public void testDateTimeMatch() throws Exception { } @Test - public void testTimeMismatch() throws Exception { + void testTimeMismatch() throws Exception { TableFieldSchema field = TableFieldSchema.newBuilder() .setName("time") @@ -721,19 +734,17 @@ public void testTimeMismatch() throws Exception { TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); JSONObject json = new JSONObject(); json.put("time", new JSONArray(new Double[] {1.0})); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage( - TestTime.getDescriptor(), tableSchema, json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage().contains("JSONObject does not have a int64 field at root.time[0].")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestTime.getDescriptor(), tableSchema, json)); + assertTrue(e.getMessage().contains("JSONObject does not have a int64 field at root.time[0].")); } @Test - public void testMixedCaseFieldNames() throws Exception { + void testMixedCaseFieldNames() throws Exception { TableFieldSchema field = TableFieldSchema.newBuilder() .setName("fooBar") @@ -751,7 +762,7 @@ public void testMixedCaseFieldNames() throws Exception { } @Test - public void testDouble() throws Exception { + void testDouble() throws Exception { TestDouble expectedProto = TestDouble.newBuilder() .setDouble(1.2) @@ -776,7 +787,7 @@ public void testDouble() throws Exception { } @Test - public void testDoubleHighPrecision() throws Exception { + void testDoubleHighPrecision() throws Exception { TableSchema tableSchema = TableSchema.newBuilder() .addFields( @@ -800,7 +811,7 @@ public void testDoubleHighPrecision() throws Exception { } @Test - public void testDoubleHighPrecision_RepeatedField() throws Exception { + void testDoubleHighPrecision_RepeatedField() throws Exception { TableSchema tableSchema = TableSchema.newBuilder() .addFields( @@ -830,7 +841,7 @@ public void testDoubleHighPrecision_RepeatedField() throws Exception { } @Test - public void testTimestamp() throws Exception { + void testTimestamp() throws Exception { TableSchema tableSchema = TableSchema.newBuilder() .addFields(TableFieldSchema.newBuilder(TEST_TIMESTAMP).setName("test_string").build()) @@ -874,7 +885,77 @@ public void testTimestamp() throws Exception { } @Test - public void testTimestampRepeated() throws Exception { + void testTimestamp_higherPrecision() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_string") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_string_T_Z") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_long") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_int") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_float") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_offset") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_zero_offset") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_timezone") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION) + .setName("test_saformat") + .build()) + .build(); + + TestTimestampHigherPrecision expectedProto = + TestTimestampHigherPrecision.newBuilder() + .setTestString("1970-01-01T00:00:00.000010+00:00") + .setTestStringTZ("2022-03-28T18:47:59.010000+00:00") + .setTestLong("2023-06-28T20:28:05.000000+00:00") + .setTestInt("1970-01-01T00:02:33.480695+00:00") + .setTestFloat("1970-01-02T18:37:48.069500+00:00") + .setTestOffset("2022-04-05T05:06:11.000000+00:00") + .setTestZeroOffset("2022-03-28T18:47:59.010000+00:00") + .setTestTimezone("2022-04-05T16:06:11.000000+00:00") + .setTestSaformat("2018-08-19T12:11:00.000000+00:00") + .build(); + JSONObject json = new JSONObject(); + json.put("test_string", "1970-01-01 00:00:00.000010"); + json.put("test_string_T_Z", "2022-03-28T18:47:59.01Z"); + json.put("test_long", 1687984085000000L); + json.put("test_int", 153480695); + json.put("test_float", "1.534680695e11"); + json.put("test_offset", "2022-04-05T09:06:11+04:00"); + json.put("test_zero_offset", "2022-03-28T18:47:59.01+00:00"); + json.put("test_timezone", "2022-04-05 09:06:11 PST"); + json.put("test_saformat", "2018/08/19 12:11"); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestTimestampHigherPrecision.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testTimestampRepeated() throws Exception { TableSchema tableSchema = TableSchema.newBuilder() .addFields( @@ -944,7 +1025,78 @@ public void testTimestampRepeated() throws Exception { } @Test - public void testDate() throws Exception { + void testTimestampRepeated_higherPrecision() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_string_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_string_T_Z_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_long_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_int_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_float_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_zero_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_timezone_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_HIGHER_PRECISION_REPEATED) + .setName("test_saformat_repeated") + .build()) + .build(); + + TestRepeatedTimestampHigherPrecision expectedProto = + TestRepeatedTimestampHigherPrecision.newBuilder() + .addTestStringRepeated("1970-01-01T00:00:00.000010+00:00") + .addTestStringTZRepeated("2022-03-28T18:47:59.010000+00:00") + .addTestLongRepeated("2023-06-28T20:28:05.000000+00:00") + .addTestIntRepeated("1970-01-01T00:02:33.480695+00:00") + .addTestFloatRepeated("1970-01-02T18:37:48.069500+00:00") + .addTestOffsetRepeated("2022-04-05T05:06:11.000000+00:00") + .addTestZeroOffsetRepeated("2022-03-28T18:47:59.010000+00:00") + .addTestTimezoneRepeated("2022-04-05T16:06:11.000000+00:00") + .addTestSaformatRepeated("2018-08-19T12:11:00.000000+00:00") + .build(); + JSONObject json = new JSONObject(); + json.put("test_string_repeated", new JSONArray(new String[] {"1970-01-01 00:00:00.000010"})); + json.put("test_string_T_Z_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01Z"})); + json.put("test_long_repeated", new JSONArray(new Long[] {1687984085000000L})); + json.put("test_int_repeated", new JSONArray(new Integer[] {153480695})); + json.put("test_float_repeated", new JSONArray(new String[] {"1.534680695e11"})); + json.put("test_offset_repeated", new JSONArray(new String[] {"2022-04-05T09:06:11+04:00"})); + json.put( + "test_zero_offset_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01+00:00"})); + json.put("test_timezone_repeated", new JSONArray(new String[] {"2022-04-05 09:06:11 PST"})); + json.put("test_saformat_repeated", new JSONArray(new String[] {"2018/08/19 12:11"})); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRepeatedTimestampHigherPrecision.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + void testDate() throws Exception { TableSchema tableSchema = TableSchema.newBuilder() .addFields(TableFieldSchema.newBuilder(TEST_DATE).setName("test_string").build()) @@ -961,7 +1113,7 @@ public void testDate() throws Exception { } @Test - public void testAllTypes() throws Exception { + void testAllTypes() throws Exception { for (Map.Entry entry : AllTypesToDebugMessageTest.entrySet()) { int success = 0; for (JSONObject json : simpleJSONObjects) { @@ -970,7 +1122,7 @@ public void testAllTypes() throws Exception { DynamicMessage protoMsg = JsonToProtoMessage.INSTANCE.convertToProtoMessage(entry.getKey(), json); LOG.info("Convert Success!"); - assertEquals(protoMsg, AllTypesToCorrectProto.get(entry.getKey())[success]); + assertEquals(AllTypesToCorrectProto.get(entry.getKey())[success], protoMsg); success += 1; } catch (IllegalArgumentException e) { assertTrue( @@ -982,20 +1134,20 @@ public void testAllTypes() throws Exception { } } if (entry.getKey() == DoubleType.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 3, success); + assertEquals(3, success, entry.getKey().getFullName()); } else if (entry.getKey() == Int64Type.getDescriptor() || entry.getKey() == BytesType.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 2, success); + assertEquals(2, success, entry.getKey().getFullName()); } else if (entry.getKey() == StringType.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 4, success); + assertEquals(4, success, entry.getKey().getFullName()); } else { - assertEquals(entry.getKey().getFullName(), 1, success); + assertEquals(1, success, entry.getKey().getFullName()); } } } @Test - public void testAllRepeatedTypesWithLimits() throws Exception { + void testAllRepeatedTypesWithLimits() throws Exception { for (Map.Entry entry : AllRepeatedTypesToDebugMessageTest.entrySet()) { int success = 0; for (JSONObject json : simpleJSONArrays) { @@ -1005,9 +1157,9 @@ public void testAllRepeatedTypesWithLimits() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(entry.getKey(), json); LOG.info("Convert Success!"); assertEquals( - protoMsg.toString(), + AllRepeatedTypesToCorrectProto.get(entry.getKey())[success], protoMsg, - AllRepeatedTypesToCorrectProto.get(entry.getKey())[success]); + protoMsg.toString()); success += 1; } catch (IllegalArgumentException e) { LOG.info(e.getMessage()); @@ -1022,19 +1174,19 @@ public void testAllRepeatedTypesWithLimits() throws Exception { } } if (entry.getKey() == RepeatedDouble.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 4, success); + assertEquals(4, success, entry.getKey().getFullName()); } else if (entry.getKey() == RepeatedInt64.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 2, success); + assertEquals(2, success, entry.getKey().getFullName()); } else if (entry.getKey() == RepeatedString.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 4, success); + assertEquals(4, success, entry.getKey().getFullName()); } else { - assertEquals(entry.getKey().getFullName(), 1, success); + assertEquals(1, success, entry.getKey().getFullName()); } } } @Test - public void testOptional() throws Exception { + void testOptional() throws Exception { TestInt64 expectedProto = TestInt64.newBuilder().setByte(1).build(); JSONObject json = new JSONObject(); json.put("byte", 1); @@ -1045,7 +1197,7 @@ public void testOptional() throws Exception { } @Test - public void testRepeatedIsOptional() throws Exception { + void testRepeatedIsOptional() throws Exception { TestRepeatedIsOptional expectedProto = TestRepeatedIsOptional.newBuilder().setRequiredDouble(1.1).build(); JSONObject json = new JSONObject(); @@ -1058,22 +1210,22 @@ public void testRepeatedIsOptional() throws Exception { } @Test - public void testRequired() throws Exception { + void testRequired() throws Exception { JSONObject json = new JSONObject(); json.put("optional_double", 1.1); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestRequired.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage() - .contains("JSONObject does not have the required field root.required_double.")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRequired.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("JSONObject does not have the required field root.required_double.")); } @Test - public void testRange() throws Exception { + void testRange() throws Exception { TableSchema tableSchema = TableSchema.newBuilder() .addFields( @@ -1195,7 +1347,7 @@ public void testRange() throws Exception { } @Test - public void testStructSimple() throws Exception { + void testStructSimple() throws Exception { structSimple("test", "test"); structSimple(true, "true"); structSimple(1, "1"); @@ -1217,26 +1369,26 @@ private void structSimple(Object value, String expected) throws Exception { } @Test - public void testStructSimpleFail() throws Exception { + void testStructSimpleFail() throws Exception { JSONObject stringType = new JSONObject(); stringType.put("test_field_type", new boolean[0]); JSONObject json = new JSONObject(); json.put("test_field_type", stringType); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(MessageType.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage() - .contains( - "JSONObject does not have a string field at" - + " root.test_field_type.test_field_type.")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + MessageType.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at" + + " root.test_field_type.test_field_type.")); } @Test - public void testStructComplex() throws Exception { + void testStructComplex() throws Exception { ComplexRoot expectedProto = ComplexRoot.newBuilder() .setTestInt(1) @@ -1305,6 +1457,7 @@ public void testStructComplex() throws Exception { BigDecimalByteStringEncoder.encodeToBigNumericByteString(new BigDecimal(5D))) .setTestInterval("0-0 0 0:0:0.000005") .addTestJson("{'a':'b'}") + .setTestTimestampHigherPrecision("2025-12-01 12:34:56.123456789123+00:00") .build(); JSONObject complex_lvl2 = new JSONObject(); complex_lvl2.put("test_int", 3); @@ -1370,6 +1523,7 @@ public void testStructComplex() throws Exception { json.put("test_bignumeric_double", 5D); json.put("test_interval", "0-0 0 0:0:0.000005"); json.put("test_json", new JSONArray(new String[] {"{'a':'b'}"})); + json.put("test_timestamp_higher_precision", "2025-12-01 12:34:56.123456789123+00:00"); DynamicMessage protoMsg = JsonToProtoMessage.INSTANCE.convertToProtoMessage( ComplexRoot.getDescriptor(), COMPLEX_TABLE_SCHEMA, json); @@ -1377,7 +1531,7 @@ public void testStructComplex() throws Exception { } @Test - public void testStructComplexFail() throws Exception { + void testStructComplexFail() throws Exception { JSONObject complex_lvl2 = new JSONObject(); complex_lvl2.put("test_int", 3); @@ -1395,34 +1549,34 @@ public void testStructComplexFail() throws Exception { json.put("complex_lvl1", complex_lvl1); json.put("complex_lvl2", complex_lvl2); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexRoot.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage() - .contains("JSONObject does not have a int64 field at root.complex_lvl1.test_int.")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + ComplexRoot.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a int64 field at root.complex_lvl1.test_int.")); } @Test - public void testRepeatedWithMixedTypes() throws Exception { + void testRepeatedWithMixedTypes() throws Exception { JSONObject json = new JSONObject(); json.put("test_repeated", new JSONArray("[1.1, 2.2, true]")); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedDouble.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage() - .contains("JSONObject does not have a double field at root.test_repeated[2].")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedDouble.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a double field at root.test_repeated[2].")); } @Test - public void testNestedRepeatedComplex() throws Exception { + void testNestedRepeatedComplex() throws Exception { NestedRepeated expectedProto = NestedRepeated.newBuilder() .addDouble(1.1) @@ -1461,7 +1615,7 @@ public void testNestedRepeatedComplex() throws Exception { } @Test - public void testNestedRepeatedComplexFail() throws Exception { + void testNestedRepeatedComplexFail() throws Exception { double[] doubleArr = {1.1, 2.2, 3.3, 4.4, 5.5}; Boolean[][] fakeStringArr = {new Boolean[0], new Boolean[0]}; int[] intArr = {1, 2, 3, 4, 5}; @@ -1473,21 +1627,21 @@ public void testNestedRepeatedComplexFail() throws Exception { jsonRepeatedString.put("test_repeated", new JSONArray(fakeStringArr)); json.put("repeated_string", jsonRepeatedString); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(NestedRepeated.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage() - .contains( - "JSONObject does not have a string field at" - + " root.repeated_string.test_repeated[0].")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + NestedRepeated.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at" + + " root.repeated_string.test_repeated[0].")); } @Test - public void testEmptySecondLevelObject() throws Exception { + void testEmptySecondLevelObject() throws Exception { ComplexLvl1 expectedProto = ComplexLvl1.newBuilder() .setTestInt(1) @@ -1504,24 +1658,24 @@ public void testEmptySecondLevelObject() throws Exception { } @Test - public void testAllowUnknownFieldsError() throws Exception { + void testAllowUnknownFieldsError() throws Exception { JSONObject json = new JSONObject(); json.put("test_repeated", new JSONArray(new int[] {1, 2, 3, 4, 5})); json.put("string", "hello"); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedInt64.getDescriptor(), json); - Assert.fail("Should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage() - .contains("The source object has fields unknown to BigQuery: " + "root.string.")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedInt64.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains("The source object has fields unknown to BigQuery: " + "root.string.")); } @Test - public void testEmptyProtoMessage() throws Exception { + void testEmptyProtoMessage() throws Exception { JSONObject json = new JSONObject(); json.put("test_repeated", new JSONArray(new int[0])); @@ -1531,61 +1685,57 @@ public void testEmptyProtoMessage() throws Exception { } @Test - public void testEmptyJSONObject() throws Exception { + void testEmptyJSONObject() throws Exception { JSONObject json = new JSONObject(); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(Int64Type.getDescriptor(), json); - Assert.fail("Should fail"); - } catch (IllegalStateException e) { - assertEquals("JSONObject is empty.", e.getMessage()); - } + IllegalStateException e = + assertThrows( + IllegalStateException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage(Int64Type.getDescriptor(), json)); + assertEquals("JSONObject is empty.", e.getMessage()); } @Test - public void testNullJson() throws Exception { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(Int64Type.getDescriptor(), null); - Assert.fail("Should fail"); - } catch (NullPointerException e) { - assertEquals("JSONObject is null.", e.getMessage()); - } + void testNullJson() throws Exception { + NullPointerException e = + assertThrows( + NullPointerException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage(Int64Type.getDescriptor(), null)); + assertEquals("JSONObject is null.", e.getMessage()); } @Test - public void testNullDescriptor() throws Exception { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(null, new JSONObject()); - Assert.fail("Should fail"); - } catch (NullPointerException e) { - assertEquals("Protobuf descriptor is null.", e.getMessage()); - } + void testNullDescriptor() throws Exception { + NullPointerException e = + assertThrows( + NullPointerException.class, + () -> JsonToProtoMessage.INSTANCE.convertToProtoMessage(null, new JSONObject())); + assertEquals("Protobuf descriptor is null.", e.getMessage()); } @Test - public void testAllowUnknownFieldsSecondLevel() throws Exception { + void testAllowUnknownFieldsSecondLevel() throws Exception { JSONObject complex_lvl2 = new JSONObject(); complex_lvl2.put("no_match", 1); JSONObject json = new JSONObject(); json.put("test_int", 1); json.put("complex_lvl2", complex_lvl2); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexLvl1.getDescriptor(), json); - Assert.fail("Should fail"); - } catch (IllegalArgumentException e) { - assertTrue( - e.getMessage() - .contains( - "The source object has fields unknown to BigQuery: root.complex_lvl2.no_match.")); - } + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + ComplexLvl1.getDescriptor(), json)); + assertTrue( + e.getMessage() + .contains( + "The source object has fields unknown to BigQuery: root.complex_lvl2.no_match.")); } @Test - public void testTopLevelMatchSecondLevelMismatch() throws Exception { + void testTopLevelMatchSecondLevelMismatch() throws Exception { ComplexLvl1 expectedProto = ComplexLvl1.newBuilder() .setTestInt(1) @@ -1602,7 +1752,7 @@ public void testTopLevelMatchSecondLevelMismatch() throws Exception { } @Test - public void testJsonNullValue() throws Exception { + void testJsonNullValue() throws Exception { TestInt64 expectedProto = TestInt64.newBuilder().setInt(1).build(); JSONObject json = new JSONObject(); json.put("long", JSONObject.NULL); @@ -1613,7 +1763,7 @@ public void testJsonNullValue() throws Exception { } @Test - public void testJsonAllFieldsNullValue() throws Exception { + void testJsonAllFieldsNullValue() throws Exception { TestInt64 expectedProto = TestInt64.newBuilder().build(); JSONObject json = new JSONObject(); json.put("long", JSONObject.NULL); @@ -1624,7 +1774,7 @@ public void testJsonAllFieldsNullValue() throws Exception { } @Test - public void testBadJsonFieldRepeated() throws Exception { + void testBadJsonFieldRepeated() throws Exception { TableSchema ts = TableSchema.newBuilder() .addFields( @@ -1638,19 +1788,18 @@ public void testBadJsonFieldRepeated() throws Exception { JSONObject json = new JSONObject(); json.put("test_repeated", new JSONArray(new String[] {"123", "blah"})); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage( - RepeatedBytes.getDescriptor(), ts, json); - Assert.fail("Should fail"); - } catch (RowIndexToErrorException ex) { - assertTrue(ex.rowIndexToErrorMessage.size() == 1); - assertTrue(ex.getMessage().contains("root.test_repeated failed to convert to NUMERIC.")); - } + RowIndexToErrorException ex = + assertThrows( + RowIndexToErrorException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedBytes.getDescriptor(), ts, json)); + assertTrue(ex.rowIndexToErrorMessage.size() == 1); + assertTrue(ex.getMessage().contains("root.test_repeated failed to convert to NUMERIC.")); } @Test - public void testBadJsonFieldIntRepeated() throws Exception { + void testBadJsonFieldIntRepeated() throws Exception { TableSchema ts = TableSchema.newBuilder() .addFields( @@ -1664,18 +1813,17 @@ public void testBadJsonFieldIntRepeated() throws Exception { JSONObject json = new JSONObject(); json.put("test_repeated", new JSONArray(new String[] {"blah"})); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.INSTANCE.convertToProtoMessage( - RepeatedInt32.getDescriptor(), ts, json); - Assert.fail("Should fail"); - } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().contains("Text 'blah' could not be parsed at index 0")); - } + IllegalArgumentException ex = + assertThrows( + IllegalArgumentException.class, + () -> + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + RepeatedInt32.getDescriptor(), ts, json)); + assertTrue(ex.getMessage().contains("Text 'blah' could not be parsed at index 0")); } @Test - public void testNullRepeatedField() throws Exception { + void testNullRepeatedField() throws Exception { TableSchema ts = TableSchema.newBuilder() .addFields( @@ -1711,7 +1859,7 @@ public void testNullRepeatedField() throws Exception { } @Test - public void testDoubleAndFloatToNumericConversion() { + void testDoubleAndFloatToNumericConversion() { TableSchema ts = TableSchema.newBuilder() .addFields( @@ -1738,7 +1886,7 @@ public void testDoubleAndFloatToNumericConversion() { } @Test - public void testDoubleAndFloatToNumericConversionWithJsonArray() { + void testDoubleAndFloatToNumericConversionWithJsonArray() { TableSchema ts = TableSchema.newBuilder() .addFields( @@ -1783,7 +1931,7 @@ public void testDoubleAndFloatToNumericConversionWithJsonArray() { } @Test - public void testBigDecimalToBigNumericConversion() { + void testBigDecimalToBigNumericConversion() { TableSchema ts = TableSchema.newBuilder() .addFields( @@ -1808,7 +1956,7 @@ public void testBigDecimalToBigNumericConversion() { } @Test - public void testDoubleAndFloatToRepeatedBigNumericConversion() { + void testDoubleAndFloatToRepeatedBigNumericConversion() { TableSchema ts = TableSchema.newBuilder() .addFields( @@ -1834,4 +1982,60 @@ public void testDoubleAndFloatToRepeatedBigNumericConversion() { JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestBignumeric.getDescriptor(), ts, json); assertEquals(expectedProto, protoMsg); } + + @Test + void testGetTimestampAsString() { + // String case must be in ISO8601 format + assertEquals( + "2025-10-01T12:34:56.123456+00:00", + JsonToProtoMessage.getTimestampAsString("2025-10-01 12:34:56.123456+00:00")); + assertEquals( + "2025-10-01T12:34:56.123456789123+00:00", + JsonToProtoMessage.getTimestampAsString("2025-10-01T12:34:56.123456789123+00:00")); + + // Numeric case must be micros from epoch + assertEquals("1970-01-01T00:00:00.000001+00:00", JsonToProtoMessage.getTimestampAsString(1L)); + assertEquals("1969-12-31T23:59:59.999999+00:00", JsonToProtoMessage.getTimestampAsString(-1L)); + assertEquals( + "1970-01-01T00:00:00.001234+00:00", JsonToProtoMessage.getTimestampAsString("1234")); + assertEquals("1970-01-01T00:00:00.000010+00:00", JsonToProtoMessage.getTimestampAsString(10.4)); + assertEquals( + "1969-12-31T23:59:59.999000+00:00", JsonToProtoMessage.getTimestampAsString("-1000.4")); + + // Protobuf timestamp format is converted to ISO8601 string + assertEquals( + "1970-01-02T10:17:36.000123456+00:00", + JsonToProtoMessage.getTimestampAsString( + Timestamp.newBuilder().setSeconds(123456).setNanos(123456).build())); + assertEquals( + "1969-12-30T13:42:23.999876544+00:00", + JsonToProtoMessage.getTimestampAsString( + Timestamp.newBuilder().setSeconds(-123456).setNanos(-123456).build())); + + assertThrows( + IllegalArgumentException.class, + () -> JsonToProtoMessage.getTimestampAsString("2025-10-01")); + assertThrows( + IllegalArgumentException.class, () -> JsonToProtoMessage.getTimestampAsString("abc")); + assertThrows( + IllegalArgumentException.class, + () -> JsonToProtoMessage.getTimestampAsString(Timestamp.newBuilder())); + assertThrows( + IllegalArgumentException.class, + () -> JsonToProtoMessage.getTimestampAsString(new Object())); + assertThrows( + IllegalArgumentException.class, () -> JsonToProtoMessage.getTimestampAsString(null)); + } + + @Test + void testFromEpochMicros() { + // The `+` is added if there are more than 4 digits for years + assertEquals( + "+294247-01-10T04:00:54.775807Z", + JsonToProtoMessage.fromEpochMicros(Long.MAX_VALUE).toString()); + assertEquals( + "-290308-12-21T19:59:05.224192Z", + JsonToProtoMessage.fromEpochMicros(Long.MIN_VALUE).toString()); + assertEquals(Instant.EPOCH.toString(), JsonToProtoMessage.fromEpochMicros(0L).toString()); + } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java index 68d6ae166f..731000e6a3 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java index b74d19c656..420ce9c075 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java index 6b3a984ef1..03737b87ef 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWrite.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java index 3a826f8933..23723fb921 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverterTest.java index 9e3eafbb82..7cb5001f22 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverterTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaConverterTest.java @@ -15,18 +15,21 @@ */ package com.google.cloud.bigquery.storage.v1; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + import com.google.api.gax.rpc.InvalidArgumentException; import com.google.cloud.bigquery.storage.test.Test.*; import com.google.protobuf.DescriptorProtos.FileDescriptorProto; import com.google.protobuf.Descriptors; -import org.junit.*; +import org.junit.jupiter.api.Test; public class ProtoSchemaConverterTest { @Test - public void convertSimple() { + void convertSimple() { AllSupportedTypes testProto = AllSupportedTypes.newBuilder().setStringValue("abc").build(); ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.assertEquals( + assertEquals( "name: \"com_google_cloud_bigquery_storage_test_AllSupportedTypes\"\n" + "field {\n" + " name: \"int32_value\"\n" @@ -101,10 +104,10 @@ public void convertSimple() { } @Test - public void convertNested() { + void convertNested() { ComplicateType testProto = ComplicateType.newBuilder().build(); ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.assertEquals( + assertEquals( "name: \"com_google_cloud_bigquery_storage_test_ComplicateType\"\n" + "field {\n" + " name: \"nested_repeated_type\"\n" @@ -143,34 +146,32 @@ public void convertNested() { } @Test - public void convertRecursive() { - try { - RecursiveType testProto = RecursiveType.newBuilder().build(); - ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - Assert.assertEquals( - "Recursive type is not supported:com.google.cloud.bigquery.storage.test.RecursiveType", - e.getMessage()); - } + void convertRecursive() { + RecursiveType testProto = RecursiveType.newBuilder().build(); + InvalidArgumentException e = + assertThrows( + InvalidArgumentException.class, + () -> ProtoSchemaConverter.convert(testProto.getDescriptorForType())); + assertEquals( + "Recursive type is not supported:com.google.cloud.bigquery.storage.test.RecursiveType", + e.getMessage()); } @Test - public void convertRecursiveTopMessage() { - try { - RecursiveTypeTopMessage testProto = RecursiveTypeTopMessage.newBuilder().build(); - ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - Assert.assertEquals( - "Recursive type is not" - + " supported:com.google.cloud.bigquery.storage.test.RecursiveTypeTopMessage", - e.getMessage()); - } + void convertRecursiveTopMessage() { + RecursiveTypeTopMessage testProto = RecursiveTypeTopMessage.newBuilder().build(); + InvalidArgumentException e = + assertThrows( + InvalidArgumentException.class, + () -> ProtoSchemaConverter.convert(testProto.getDescriptorForType())); + assertEquals( + "Recursive type is not" + + " supported:com.google.cloud.bigquery.storage.test.RecursiveTypeTopMessage", + e.getMessage()); } @Test - public void convertDuplicateType() { + void convertDuplicateType() throws Descriptors.DescriptorValidationException { DuplicateType testProto = DuplicateType.newBuilder().build(); ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); @@ -179,15 +180,11 @@ public void convertDuplicateType() { .setName("foo.proto") .addMessageType(protoSchema.getProtoDescriptor()) .build(); - try { - Descriptors.FileDescriptor fs = - Descriptors.FileDescriptor.buildFrom( - fileDescriptorProto, new Descriptors.FileDescriptor[0]); - Descriptors.Descriptor type = - fs.findMessageTypeByName(protoSchema.getProtoDescriptor().getName()); - Assert.assertEquals(4, type.getFields().size()); - } catch (Descriptors.DescriptorValidationException ex) { - Assert.fail("Got unexpected exception: " + ex.getMessage()); - } + Descriptors.FileDescriptor fs = + Descriptors.FileDescriptor.buildFrom( + fileDescriptorProto, new Descriptors.FileDescriptor[0]); + Descriptors.Descriptor type = + fs.findMessageTypeByName(protoSchema.getProtoDescriptor().getName()); + assertEquals(4, type.getFields().size()); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/RequestProfilerTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/RequestProfilerTest.java index ce699941a3..33460c190e 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/RequestProfilerTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/RequestProfilerTest.java @@ -15,7 +15,7 @@ */ package com.google.cloud.bigquery.storage.v1; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.cloud.bigquery.storage.v1.RequestProfiler.OperationName; import com.google.common.collect.ImmutableSet; @@ -28,32 +28,32 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class RequestProfilerTest { +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class RequestProfilerTest { private static final Logger log = Logger.getLogger(RequestProfiler.class.getName()); private RequestProfiler.RequestProfilerHook profilerHook = new RequestProfiler.RequestProfilerHook(true); - @Before - public void setup() { + @BeforeEach + void setup() { RequestProfiler.disableAndResetProfiler(); profilerHook.enableProfiler(); } - @After - public void close() { + @AfterEach + void close() { RequestProfiler.disableAndResetProfiler(); } @Test - public void testNormalCase() throws Exception { + void testNormalCase() throws Exception { profilerHook.startOperation(OperationName.TOTAL_LATENCY, "request_1"); profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_1"); profilerHook.endOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_1"); @@ -89,7 +89,7 @@ public void testNormalCase() throws Exception { } @Test - public void mixFinishedAndUnfinishedRequest() throws Exception { + void mixFinishedAndUnfinishedRequest() throws Exception { // Start request 1. profilerHook.startOperation(OperationName.TOTAL_LATENCY, "request_1"); profilerHook.startOperation(OperationName.JSON_TO_PROTO_CONVERSION, "request_1"); @@ -121,7 +121,7 @@ public void mixFinishedAndUnfinishedRequest() throws Exception { } @Test - public void concurrentProfilingTest_1000ReqsRunTogether() throws Exception { + void concurrentProfilingTest_1000ReqsRunTogether() throws Exception { int totalRequest = 1000; ListeningExecutorService threadPool = MoreExecutors.listeningDecorator( @@ -167,10 +167,13 @@ public void concurrentProfilingTest_1000ReqsRunTogether() throws Exception { assertTrue(reportText.contains("Request uuid: request_30 with total time")); assertTrue(reportText.contains("Request uuid: request_25 with total time")); assertTrue(reportText.contains("Request uuid: request_20 with total time")); + + threadPool.shutdown(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); } @Test - public void concurrentProfilingTest_RunWhileFlushing() throws Exception { + void concurrentProfilingTest_RunWhileFlushing() throws Exception { int totalRequest = 1000; ListeningExecutorService threadPool = MoreExecutors.listeningDecorator( @@ -211,5 +214,8 @@ public void concurrentProfilingTest_RunWhileFlushing() throws Exception { } String reportText = profilerHook.flushAndGenerateReportText(); assertTrue(reportText.contains("0 requests finished during")); + + threadPool.shutdown(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/StreamWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/StreamWriterTest.java index 200babf5a8..614ec75333 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/StreamWriterTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/StreamWriterTest.java @@ -17,10 +17,11 @@ import static com.google.common.truth.Truth.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.api.client.util.Sleeper; import com.google.api.core.ApiFuture; @@ -91,16 +92,14 @@ import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.FieldType; import org.apache.arrow.vector.types.pojo.Schema; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.function.ThrowingRunnable; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; -@RunWith(JUnit4.class) -public class StreamWriterTest { +@Execution(ExecutionMode.SAME_THREAD) +class StreamWriterTest { private static final Logger log = Logger.getLogger(StreamWriterTest.class.getName()); private static final String TEST_STREAM_1 = "projects/p/datasets/d1/tables/t1/streams/_default"; @@ -152,7 +151,7 @@ public class StreamWriterTest { UPDATED_TABLE_SCHEMA)); private static final BufferAllocator allocator = new RootAllocator(); - public StreamWriterTest() throws DescriptorValidationException { + StreamWriterTest() throws DescriptorValidationException { Field foo = new Field("foo", FieldType.nullable(new ArrowType.Utf8()), null); ARROW_SCHEMA = new Schema(Arrays.asList(foo)); final ByteArrayOutputStream out = new ByteArrayOutputStream(); @@ -166,8 +165,8 @@ public StreamWriterTest() throws DescriptorValidationException { ArrowSchema.newBuilder().setSerializedSchema(ByteString.copyFrom(bytes)).build(); } - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { testBigQueryWrite = new FakeBigQueryWrite(); StreamWriter.setMaxRequestCallbackWaitTime(java.time.Duration.ofSeconds(10000)); ConnectionWorker.setMaxInflightQueueWaitTime(300000); @@ -186,12 +185,14 @@ public void setUp() throws Exception { StreamWriter.cleanUp(); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { log.info("tearDown called"); - client.close(); serviceHelper.stop(); StreamWriter.cleanUp(); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } private StreamWriter getMultiplexingTestStreamWriter() throws IOException { @@ -366,15 +367,12 @@ private static T assertFutureException( Class expectedThrowable, final Future future) { return assertThrows( expectedThrowable, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - try { - future.get(); - } catch (ExecutionException ex) { - // Future wraps exception with ExecutionException. So unwrapper it here. - throw ex.getCause(); - } + () -> { + try { + future.get(); + } catch (ExecutionException ex) { + // Future wraps exception with ExecutionException. So unwrapper it here. + throw ex.getCause(); } }); } @@ -415,7 +413,7 @@ private void verifyAppendRequests(long appendCount) { } } - public void testBuildBigQueryWriteClientInWriter() throws Exception { + void testBuildBigQueryWriteClientInWriter() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1) .setCredentialsProvider(NoCredentialsProvider.create()) @@ -463,7 +461,7 @@ public FakeBigQueryWriteImpl.Response get() { } @Test - public void testAppendSuccess() throws Exception { + void testAppendSuccess() throws Exception { StreamWriter writer = getTestStreamWriter(); long appendCount = 100; @@ -486,7 +484,7 @@ public void testAppendSuccess() throws Exception { } @Test - public void testAppendSuccess_RetryDirectlyInCallback() throws Exception { + void testAppendSuccess_RetryDirectlyInCallback() throws Exception { // Set a relatively small in flight request counts. StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) @@ -544,12 +542,12 @@ public void onFailure(Throwable throwable) { } @Test - public void testUpdatedSchemaFetch_multiplexing() throws Exception { + void testUpdatedSchemaFetch_multiplexing() throws Exception { testUpdatedSchemaFetch(/* enableMultiplexing= */ true); } @Test - public void testUpdatedSchemaFetch_nonMultiplexing() throws Exception { + void testUpdatedSchemaFetch_nonMultiplexing() throws Exception { testUpdatedSchemaFetch(/* enableMultiplexing= */ false); } @@ -590,66 +588,51 @@ private void testUpdatedSchemaFetch(boolean enableMultiplexing) } @Test - public void testNoSchema() throws Exception { + void testNoSchema() throws Exception { StatusRuntimeException ex = assertThrows( StatusRuntimeException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriter.newBuilder(TEST_STREAM_1, client).build(); - } + () -> { + StreamWriter.newBuilder(TEST_STREAM_1, client).build(); }); assertEquals(ex.getStatus().getCode(), Status.INVALID_ARGUMENT.getCode()); assertTrue(ex.getStatus().getDescription().contains("Writer schema must be provided")); } @Test - public void testInvalidTraceId() throws Exception { + void testInvalidTraceId() throws Exception { assertThrows( IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriter.newBuilder(TEST_STREAM_1).setTraceId("abc"); - } + () -> { + StreamWriter.newBuilder(TEST_STREAM_1).setTraceId("abc"); }); assertThrows( IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriter.newBuilder(TEST_STREAM_1).setTraceId("abc:"); - } + () -> { + StreamWriter.newBuilder(TEST_STREAM_1).setTraceId("abc:"); }); assertThrows( IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriter.newBuilder(TEST_STREAM_1).setTraceId(":abc"); - } + () -> { + StreamWriter.newBuilder(TEST_STREAM_1).setTraceId(":abc"); }); } @Test - public void testEnableConnectionPoolOnExplicitStream() throws Exception { + void testEnableConnectionPoolOnExplicitStream() throws Exception { IllegalArgumentException ex = assertThrows( IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriter.newBuilder(EXPLICIT_STREAM, client) - .setEnableConnectionPool(true) - .build(); - } + () -> { + StreamWriter.newBuilder(EXPLICIT_STREAM, client) + .setEnableConnectionPool(true) + .build(); }); assertTrue(ex.getMessage().contains("Trying to enable connection pool in non-default stream.")); } @Test - public void testShortenStreamNameAllowed() throws Exception { + void testShortenStreamNameAllowed() throws Exception { // no exception is thrown. StreamWriter.newBuilder(TEST_STREAM_SHORTEN, client) .setEnableConnectionPool(true) @@ -658,7 +641,7 @@ public void testShortenStreamNameAllowed() throws Exception { } @Test - public void testAppendSuccessAndConnectionError() throws Exception { + void testAppendSuccessAndConnectionError() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -682,7 +665,7 @@ public void testAppendSuccessAndConnectionError() throws Exception { } @Test - public void testAppendSuccessAndInStreamError() throws Exception { + void testAppendSuccessAndInStreamError() throws Exception { StreamWriter writer = getTestStreamWriter(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addResponse( @@ -704,7 +687,7 @@ public void testAppendSuccessAndInStreamError() throws Exception { } @Test - public void testAppendFailedSchemaError() throws Exception { + void testAppendFailedSchemaError() throws Exception { StreamWriter writer = getTestStreamWriter(); StorageError storageError = @@ -736,7 +719,7 @@ public void testAppendFailedSchemaError() throws Exception { } @Test - public void testAppendFailRandomException() throws Exception { + void testAppendFailRandomException() throws Exception { StreamWriter writer = getTestStreamWriter(); // Trigger a non-StatusRuntimeException for append operation (although grpc API should not // return anything other than StatusRuntimeException) @@ -751,7 +734,7 @@ public void testAppendFailRandomException() throws Exception { } @Test - public void longIdleBetweenAppends() throws Exception { + void longIdleBetweenAppends() throws Exception { StreamWriter writer = getTestStreamWriter(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addResponse(createAppendResponse(1)); @@ -769,7 +752,7 @@ public void longIdleBetweenAppends() throws Exception { } @Test - public void testAppendAfterUserClose() throws Exception { + void testAppendAfterUserClose() throws Exception { StreamWriter writer = getTestStreamWriter(); testBigQueryWrite.addResponse(createAppendResponse(0)); @@ -785,7 +768,7 @@ public void testAppendAfterUserClose() throws Exception { } @Test - public void testAppendAfterServerClose() throws Exception { + void testAppendAfterServerClose() throws Exception { StreamWriter writer = getTestStreamWriter(); testBigQueryWrite.addException(Status.INVALID_ARGUMENT.asException()); @@ -803,7 +786,7 @@ public void testAppendAfterServerClose() throws Exception { } @Test - public void userCloseWhileRequestInflight() throws Exception { + void userCloseWhileRequestInflight() throws Exception { final StreamWriter writer = getTestStreamWriter(); // Server will sleep 2 seconds before sending back the response. testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(2)); @@ -825,11 +808,8 @@ public void run() { // is being closed. assertThrows( TimeoutException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - appendFuture1.get(1, TimeUnit.SECONDS); - } + () -> { + appendFuture1.get(1, TimeUnit.SECONDS); }); // Within 2 seconds, the request should be done and stream should be closed. @@ -839,7 +819,7 @@ public void run() throws Throwable { } @Test - public void serverCloseWhileRequestsInflight() throws Exception { + void serverCloseWhileRequestsInflight() throws Exception { StreamWriter writer = getTestStreamWriter(); // Server will sleep 2 seconds before closing the connection. testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(2)); @@ -866,7 +846,7 @@ public void serverCloseWhileRequestsInflight() throws Exception { } @Test - public void testZeroMaxInflightRequests() throws Exception { + void testZeroMaxInflightRequests() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -878,7 +858,7 @@ public void testZeroMaxInflightRequests() throws Exception { } @Test - public void testZeroMaxInflightBytes() throws Exception { + void testZeroMaxInflightBytes() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -890,7 +870,7 @@ public void testZeroMaxInflightBytes() throws Exception { } @Test - public void testOneMaxInflightRequests() throws Exception { + void testOneMaxInflightRequests() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -907,7 +887,7 @@ public void testOneMaxInflightRequests() throws Exception { } @Test - public void testOneMaxInflightRequests_MultiplexingCase() throws Exception { + void testOneMaxInflightRequests_MultiplexingCase() throws Exception { ConnectionWorkerPool.setOptions(Settings.builder().setMaxConnectionsPerRegion(2).build()); StreamWriter writer1 = StreamWriter.newBuilder(TEST_STREAM_1, client) @@ -942,7 +922,7 @@ public void testOneMaxInflightRequests_MultiplexingCase() throws Exception { } @Test - public void testOpenTelemetryAttributes_MultiplexingCase() throws Exception { + void testOpenTelemetryAttributes_MultiplexingCase() throws Exception { ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); StreamWriter writer1 = @@ -980,7 +960,7 @@ public void testOpenTelemetryAttributes_MultiplexingCase() throws Exception { } @Test - public void testProtoSchemaPiping_nonMultiplexingCase() throws Exception { + void testProtoSchemaPiping_nonMultiplexingCase() throws Exception { ProtoSchema protoSchema = createProtoSchema(); StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) @@ -1015,7 +995,7 @@ public void testProtoSchemaPiping_nonMultiplexingCase() throws Exception { } @Test - public void testProtoSchemaPiping_multiplexingCase() throws Exception { + void testProtoSchemaPiping_multiplexingCase() throws Exception { // Use the shared connection mode. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1080,7 +1060,7 @@ public void testProtoSchemaPiping_multiplexingCase() throws Exception { } @Test - public void testMultiplexingWithDifferentStreamAndArrowSchema() throws Exception { + void testMultiplexingWithDifferentStreamAndArrowSchema() throws Exception { // Use the shared connection mode. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1153,7 +1133,7 @@ public void testMultiplexingWithDifferentStreamAndArrowSchema() throws Exception } @Test - public void testFixedCredentialProvider_nullProvider() throws Exception { + void testFixedCredentialProvider_nullProvider() throws Exception { // Use the shared connection mode. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1182,7 +1162,7 @@ public void testFixedCredentialProvider_nullProvider() throws Exception { } @Test - public void testFixedCredentialProvider_twoCredentialsSplitPool() throws Exception { + void testFixedCredentialProvider_twoCredentialsSplitPool() throws Exception { // Use the shared connection mode. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1222,7 +1202,7 @@ public void testFixedCredentialProvider_twoCredentialsSplitPool() throws Excepti } @Test - public void testFixedCredentialProvider_twoProviderSameCredentialSharePool() throws Exception { + void testFixedCredentialProvider_twoProviderSameCredentialSharePool() throws Exception { // Use the shared connection mode. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1256,7 +1236,7 @@ public void testFixedCredentialProvider_twoProviderSameCredentialSharePool() thr } @Test - public void testDefaultValueInterpretation_multiplexingCase() throws Exception { + void testDefaultValueInterpretation_multiplexingCase() throws Exception { // Use the shared connection mode. ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(1).build()); @@ -1320,7 +1300,7 @@ public void testDefaultValueInterpretation_multiplexingCase() throws Exception { } @Test - public void testAppendsWithTinyMaxInflightBytes() throws Exception { + void testAppendsWithTinyMaxInflightBytes() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -1352,7 +1332,7 @@ public void testAppendsWithTinyMaxInflightBytes() throws Exception { } @Test - public void testAppendsWithTinyMaxInflightBytesThrow() throws Exception { + void testAppendsWithTinyMaxInflightBytesThrow() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -1362,11 +1342,8 @@ public void testAppendsWithTinyMaxInflightBytesThrow() throws Exception { Exceptions.InflightBytesLimitExceededException ex = assertThrows( Exceptions.InflightBytesLimitExceededException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - writer.append(createProtoRows(new String[] {String.valueOf(10)}), -1); - } + () -> { + writer.append(createProtoRows(new String[] {String.valueOf(10)}), -1); }); assertEquals(ex.getStatus().getCode(), Status.RESOURCE_EXHAUSTED.getCode()); assertTrue( @@ -1382,20 +1359,17 @@ public void run() throws Throwable { } @Test - public void testLimitBehaviorIgnoreNotAccepted() throws Exception { + void testLimitBehaviorIgnoreNotAccepted() throws Exception { StatusRuntimeException ex = assertThrows( StatusRuntimeException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriter writer = - StreamWriter.newBuilder(TEST_STREAM_1, client) - .setWriterSchema(createProtoSchema()) - .setMaxInflightBytes(1) - .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Ignore) - .build(); - } + () -> { + StreamWriter writer = + StreamWriter.newBuilder(TEST_STREAM_1, client) + .setWriterSchema(createProtoSchema()) + .setMaxInflightBytes(1) + .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Ignore) + .build(); }); assertEquals(ex.getStatus().getCode(), Status.INVALID_ARGUMENT.getCode()); assertTrue( @@ -1405,7 +1379,7 @@ public void run() throws Throwable { } @Test - public void testMessageTooLarge() throws Exception { + void testMessageTooLarge() throws Exception { StreamWriter writer = getTestStreamWriter(); // There is an oppotunity to allow 20MB requests. @@ -1422,7 +1396,7 @@ public void testMessageTooLarge() throws Exception { } @Test - public void testWrongCompressionType() throws Exception { + void testWrongCompressionType() throws Exception { IllegalArgumentException ex = assertThrows( IllegalArgumentException.class, @@ -1437,7 +1411,7 @@ public void testWrongCompressionType() throws Exception { } @Test - public void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws Exception { + void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws Exception { ProtoSchema schema1 = createProtoSchema("foo"); StreamWriter.setMaxRequestCallbackWaitTime(java.time.Duration.ofSeconds(1)); StreamWriter writer = @@ -1474,7 +1448,7 @@ public void testThrowExceptionWhileWithinAppendLoop_MaxWaitTimeExceed() throws E } @Test - public void testAppendWithResetSuccess() throws Exception { + void testAppendWithResetSuccess() throws Exception { try (StreamWriter writer = getTestStreamWriter()) { testBigQueryWrite.setCloseEveryNAppends(113); long appendCount = 10000; @@ -1493,7 +1467,7 @@ public void testAppendWithResetSuccess() throws Exception { } @Test - public void testAppendWithResetNeverSuccess() throws Exception { + void testAppendWithResetNeverSuccess() throws Exception { try (StreamWriter writer = getTestStreamWriter()) { testBigQueryWrite.setCloseForeverAfter(1); long appendCount = 100; @@ -1518,7 +1492,7 @@ public void testAppendWithResetNeverSuccess() throws Exception { } @Test - public void testAppendWithResetNeverSuccessWithMultiplexing() throws Exception { + void testAppendWithResetNeverSuccessWithMultiplexing() throws Exception { try (StreamWriter writer = getMultiplexingTestStreamWriter()) { testBigQueryWrite.setCloseForeverAfter(1); long appendCount = 100; @@ -1545,7 +1519,7 @@ public void testAppendWithResetNeverSuccessWithMultiplexing() throws Exception { // This test is setup for the server to force a retry after all records are sent. Ensure the // records are resent, even if no new records are appeneded. @Test - public void testRetryAfterAllRecordsInflight() throws Exception { + void testRetryAfterAllRecordsInflight() throws Exception { try (StreamWriter writer = getTestStreamWriter()) { testBigQueryWrite.setCloseEveryNAppends(2); testBigQueryWrite.setTimesToClose(1); @@ -1561,7 +1535,7 @@ public void testRetryAfterAllRecordsInflight() throws Exception { } @Test - public void testWriterClosedStream() throws Exception { + void testWriterClosedStream() throws Exception { try (StreamWriter writer = getTestStreamWriter()) { // Writer is closed without any traffic. TimeUnit.SECONDS.sleep(1); @@ -1569,7 +1543,7 @@ public void testWriterClosedStream() throws Exception { } @Test - public void testWriterAlreadyClosedException() throws Exception { + void testWriterAlreadyClosedException() throws Exception { StreamWriter writer = getTestStreamWriter(); writer.close(); ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}, 0); @@ -1584,7 +1558,7 @@ public void testWriterAlreadyClosedException() throws Exception { } @Test - public void testWriterClosedException() throws Exception { + void testWriterClosedException() throws Exception { StreamWriter writer = getTestStreamWriter(); testBigQueryWrite.addException(Status.INTERNAL.asException()); ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}, 0); @@ -1604,28 +1578,28 @@ public void testWriterClosedException() throws Exception { } @Test - public void testWriterId() + void testWriterId() throws Descriptors.DescriptorValidationException, IOException, InterruptedException { StreamWriter writer1 = getTestStreamWriter(); - Assert.assertFalse(writer1.getWriterId().isEmpty()); + assertFalse(writer1.getWriterId().isEmpty()); StreamWriter writer2 = getTestStreamWriter(); - Assert.assertFalse(writer2.getWriterId().isEmpty()); - Assert.assertNotEquals(writer1.getWriterId(), writer2.getWriterId()); + assertFalse(writer2.getWriterId().isEmpty()); + assertNotEquals(writer1.getWriterId(), writer2.getWriterId()); } @Test - public void testInitialization_operationKind() throws Exception { + void testInitialization_operationKind() throws Exception { try (StreamWriter streamWriter = getMultiplexingTestStreamWriter()) { - Assert.assertEquals(streamWriter.getConnectionOperationType(), Kind.CONNECTION_WORKER_POOL); + assertEquals(streamWriter.getConnectionOperationType(), Kind.CONNECTION_WORKER_POOL); } try (StreamWriter streamWriter = getTestStreamWriter()) { - Assert.assertEquals(streamWriter.getConnectionOperationType(), Kind.CONNECTION_WORKER); + assertEquals(streamWriter.getConnectionOperationType(), Kind.CONNECTION_WORKER); } } @Test - public void testExtractDatasetName() throws Exception { - Assert.assertEquals( + void testExtractDatasetName() throws Exception { + assertEquals( StreamWriter.extractDatasetAndProjectName( "projects/project1/datasets/dataset2/tables/something"), "projects/project1/datasets/dataset2/"); @@ -1637,11 +1611,11 @@ public void testExtractDatasetName() throws Exception { StreamWriter.extractDatasetAndProjectName( "wrong/projects/project1/wrong/datasets/dataset2/tables/something"); }); - Assert.assertTrue(ex.getMessage().contains("The passed in stream name does not match")); + assertTrue(ex.getMessage().contains("The passed in stream name does not match")); } @Test - public void testRetryInUnrecoverableStatus_MultiplexingCase() throws Exception { + void testRetryInUnrecoverableStatus_MultiplexingCase() throws Exception { ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(4).build()); ConnectionWorkerPool.enableTestingLogic(); @@ -1700,7 +1674,7 @@ public void testRetryInUnrecoverableStatus_MultiplexingCase() throws Exception { } @Test - public void testCloseWhileInUnrecoverableState() throws Exception { + void testCloseWhileInUnrecoverableState() throws Exception { ConnectionWorkerPool.setOptions( Settings.builder().setMinConnectionsPerRegion(1).setMaxConnectionsPerRegion(4).build()); ConnectionWorkerPool.enableTestingLogic(); @@ -1751,8 +1725,8 @@ public StreamWriter getMultiplexingStreamWriter(String streamName) throws IOExce } // Timeout to ensure close() doesn't wait for done callback timeout. - @Test(timeout = 10000) - public void testCloseDisconnectedStream() throws Exception { + @org.junit.jupiter.api.Timeout(10000) + void testCloseDisconnectedStream() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1) .setCredentialsProvider(NoCredentialsProvider.create()) @@ -1769,7 +1743,7 @@ public void testCloseDisconnectedStream() throws Exception { } @Test - public void testSetAndGetMissingValueInterpretationMap() throws Exception { + void testSetAndGetMissingValueInterpretationMap() throws Exception { StreamWriter.Builder writerBuilder = getTestStreamWriterBuilder(); Map missingValueMap = new HashMap(); missingValueMap.put("col1", AppendRowsRequest.MissingValueInterpretation.NULL_VALUE); @@ -1780,7 +1754,7 @@ public void testSetAndGetMissingValueInterpretationMap() throws Exception { } @Test - public void testAppendWithoutMissingValueMap() throws Exception { + void testAppendWithoutMissingValueMap() throws Exception { try (StreamWriter writer = getTestStreamWriter()) { testBigQueryWrite.addResponse(createAppendResponse(0)); @@ -1797,7 +1771,7 @@ public void testAppendWithoutMissingValueMap() throws Exception { } @Test - public void testAppendWithMissingValueMap() throws Exception { + void testAppendWithMissingValueMap() throws Exception { Map missingValueMap = new HashMap(); missingValueMap.put("col1", AppendRowsRequest.MissingValueInterpretation.NULL_VALUE); missingValueMap.put("col3", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); @@ -1820,8 +1794,8 @@ public void testAppendWithMissingValueMap() throws Exception { } } - @Test(timeout = 10000) - public void testStreamWriterUserCloseMultiplexing() throws Exception { + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterUserCloseMultiplexing() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -1844,8 +1818,8 @@ public void testStreamWriterUserCloseMultiplexing() throws Exception { assertTrue(writer.isUserClosed()); } - @Test(timeout = 10000) - public void testStreamWriterUserCloseNoMultiplexing() throws Exception { + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterUserCloseNoMultiplexing() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(createProtoSchema()).build(); @@ -1864,8 +1838,8 @@ public void testStreamWriterUserCloseNoMultiplexing() throws Exception { assertTrue(writer.isUserClosed()); } - @Test(timeout = 10000) - public void testStreamWriterPermanentErrorMultiplexing() throws Exception { + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterPermanentErrorMultiplexing() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client) .setWriterSchema(createProtoSchema()) @@ -1890,8 +1864,8 @@ public void testStreamWriterPermanentErrorMultiplexing() throws Exception { assertFalse(writer.isUserClosed()); } - @Test(timeout = 10000) - public void testStreamWriterPermanentErrorNoMultiplexing() throws Exception { + @org.junit.jupiter.api.Timeout(10000) + void testStreamWriterPermanentErrorNoMultiplexing() throws Exception { StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM_1, client).setWriterSchema(createProtoSchema()).build(); testBigQueryWrite.setCloseForeverAfter(1); @@ -1912,8 +1886,8 @@ public void testStreamWriterPermanentErrorNoMultiplexing() throws Exception { assertFalse(writer.isUserClosed()); } - @Test(timeout = 10000) - public void testBuilderDefaultSetting() throws Exception { + @org.junit.jupiter.api.Timeout(10000) + void testBuilderDefaultSetting() throws Exception { StreamWriter.Builder writerBuilder = StreamWriter.newBuilder(TEST_STREAM_1); BigQueryWriteSettings writeSettings = StreamWriter.getBigQueryWriteSettings(writerBuilder); assertEquals( @@ -1939,8 +1913,8 @@ public void testBuilderDefaultSetting() throws Exception { BigQueryWriteSettings.getDefaultEndpoint(), writeSettings.getEndpoint().toString()); } - @Test(timeout = 10000) - public void testBuilderExplicitSetting() throws Exception { + @org.junit.jupiter.api.Timeout(10000) + void testBuilderExplicitSetting() throws Exception { // Client has special seetings. BigQueryWriteSettings clientSettings = BigQueryWriteSettings.newBuilder() @@ -1996,10 +1970,13 @@ public void testBuilderExplicitSetting() throws Exception { ((GoogleCredentialsProvider) writerSettings2.getCredentialsProvider()) .getScopesToApply() .size()); + + client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } @Test - public void testAppendSuccessAndInternalErrorRetrySuccess() throws Exception { + void testAppendSuccessAndInternalErrorRetrySuccess() throws Exception { StreamWriter writer = getTestStreamWriterRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2022,7 +1999,7 @@ public void testAppendSuccessAndInternalErrorRetrySuccess() throws Exception { } @Test - public void testAppendSuccessAndInternalQuotaErrorRetrySuccess() throws Exception { + void testAppendSuccessAndInternalQuotaErrorRetrySuccess() throws Exception { StreamWriter writer = getTestStreamWriterRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2046,7 +2023,7 @@ public void testAppendSuccessAndInternalQuotaErrorRetrySuccess() throws Exceptio /* temporarily disable test as static variable is interfering with other tests @Test - public void testInternalQuotaError_MaxWaitTimeExceed_RetrySuccess() throws Exception { + void testInternalQuotaError_MaxWaitTimeExceed_RetrySuccess() throws Exception { // In order for the test to succeed, the given request must complete successfully even after all // the retries. The fake server is configured to fail 3 times with a quota error. This means the // client will perform retry with exponential backoff. The fake server injects 1 second of delay @@ -2074,7 +2051,7 @@ public void testInternalQuotaError_MaxWaitTimeExceed_RetrySuccess() throws Excep */ @Test - public void testAppendSuccessAndInternalErrorRetrySuccessExclusive() throws Exception { + void testAppendSuccessAndInternalErrorRetrySuccessExclusive() throws Exception { // Ensure we return an error from the fake server when a retry is in progress testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); // Ensure messages will be in the inflight queue @@ -2103,7 +2080,7 @@ public void testAppendSuccessAndInternalErrorRetrySuccessExclusive() throws Exce } @Test - public void testAppendSuccessAndInternalErrorRetryNoOffsetSuccessExclusive() throws Exception { + void testAppendSuccessAndInternalErrorRetryNoOffsetSuccessExclusive() throws Exception { StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2122,7 +2099,7 @@ public void testAppendSuccessAndInternalErrorRetryNoOffsetSuccessExclusive() thr } @Test - public void testAppendSuccessAndQuotaErrorRetryNoOffsetSuccessExclusive() throws Exception { + void testAppendSuccessAndQuotaErrorRetryNoOffsetSuccessExclusive() throws Exception { StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2141,7 +2118,7 @@ public void testAppendSuccessAndQuotaErrorRetryNoOffsetSuccessExclusive() throws } @Test - public void testExclusiveAppendSuccessAndInternalErrorRetrySuccess() throws Exception { + void testExclusiveAppendSuccessAndInternalErrorRetrySuccess() throws Exception { // Ensure we return an error from the fake server when a retry is in progress testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); // Ensure messages will be in the inflight queue @@ -2173,7 +2150,7 @@ public void testExclusiveAppendSuccessAndInternalErrorRetrySuccess() throws Exce } @Test - public void testExclusiveAppendSuccessAndQuotaErrorRetrySuccess() throws Exception { + void testExclusiveAppendSuccessAndQuotaErrorRetrySuccess() throws Exception { // Ensure we return an error from the fake server when a retry is in progress testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); // Ensure messages will be in the inflight queue @@ -2207,7 +2184,7 @@ public void testExclusiveAppendSuccessAndQuotaErrorRetrySuccess() throws Excepti } @Test - public void testAppendSuccessAndQuotaErrorRetrySuccessExclusive() throws Exception { + void testAppendSuccessAndQuotaErrorRetrySuccessExclusive() throws Exception { StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addResponse( @@ -2228,7 +2205,7 @@ public void testAppendSuccessAndQuotaErrorRetrySuccessExclusive() throws Excepti } @Test - public void testAppendSuccessWithArrowSerializedData() throws Exception { + void testAppendSuccessWithArrowSerializedData() throws Exception { StreamWriter writer = getTestStreamWriterExclusiveRetryEnabledWithArrowSchema(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addResponse(createAppendResponse(1)); @@ -2245,7 +2222,7 @@ public void testAppendSuccessWithArrowSerializedData() throws Exception { } @Test - public void testAppendSuccessWithUnserializedArrowRecordBatch() throws Exception { + void testAppendSuccessWithUnserializedArrowRecordBatch() throws Exception { StreamWriter writer = getTestStreamWriterExclusiveRetryEnabledWithUnserialiedArrowSchema(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addResponse(createAppendResponse(1)); @@ -2282,7 +2259,7 @@ public void testAppendSuccessWithUnserializedArrowRecordBatch() throws Exception } @Test - public void testAppendSuccessAndInternalErrorMaxRetryNumAttempts() throws Exception { + void testAppendSuccessAndInternalErrorMaxRetryNumAttempts() throws Exception { StreamWriter writer = getTestStreamWriterRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2312,7 +2289,7 @@ public void testAppendSuccessAndInternalErrorMaxRetryNumAttempts() throws Except } @Test - public void testAppendSuccessAndQuotaErrorMaxRetryNumAttempts() throws Exception { + void testAppendSuccessAndQuotaErrorMaxRetryNumAttempts() throws Exception { StreamWriter writer = getTestStreamWriterRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2343,7 +2320,7 @@ public void testAppendSuccessAndQuotaErrorMaxRetryNumAttempts() throws Exception } @Test - public void testExclusiveAppendSuccessAndInternalErrorRetryMaxRetry() throws Exception { + void testExclusiveAppendSuccessAndInternalErrorRetryMaxRetry() throws Exception { testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); // Ensure messages will be in the inflight queue testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); @@ -2379,7 +2356,7 @@ public void testExclusiveAppendSuccessAndInternalErrorRetryMaxRetry() throws Exc } @Test - public void testExclusiveAppendSuccessAndQuotaErrorRetryMaxRetry() throws Exception { + void testExclusiveAppendSuccessAndQuotaErrorRetryMaxRetry() throws Exception { testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); // Ensure messages will be in the inflight queue testBigQueryWrite.setResponseSleep(java.time.Duration.ofSeconds(1)); @@ -2416,7 +2393,7 @@ public void testExclusiveAppendSuccessAndQuotaErrorRetryMaxRetry() throws Except } @Test - public void testExclusiveAppendQuotaErrorRetryExponentialBackoff() throws Exception { + void testExclusiveAppendQuotaErrorRetryExponentialBackoff() throws Exception { testBigQueryWrite.setReturnErrorDuringExclusiveStreamRetry(true); StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); @@ -2456,7 +2433,7 @@ public void testExclusiveAppendQuotaErrorRetryExponentialBackoff() throws Except } @Test - public void testAppendInternalErrorRetryExponentialBackoff() throws Exception { + void testAppendInternalErrorRetryExponentialBackoff() throws Exception { StreamWriter writer = getTestStreamWriterRetryEnabled(); testBigQueryWrite.addResponse( @@ -2494,7 +2471,7 @@ public void testAppendInternalErrorRetryExponentialBackoff() throws Exception { } @Test - public void testAppendSuccessAndNonRetryableError() throws Exception { + void testAppendSuccessAndNonRetryableError() throws Exception { StreamWriter writer = getTestStreamWriterRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2519,7 +2496,7 @@ public void testAppendSuccessAndNonRetryableError() throws Exception { } @Test - public void testExclusiveAppendSuccessAndNonRetryableError() throws Exception { + void testExclusiveAppendSuccessAndNonRetryableError() throws Exception { StreamWriter writer = getTestStreamWriterExclusiveRetryEnabled(); testBigQueryWrite.addResponse(createAppendResponse(0)); testBigQueryWrite.addStatusException( @@ -2544,7 +2521,7 @@ public void testExclusiveAppendSuccessAndNonRetryableError() throws Exception { } @Test - public void testGetDefaultStreamName() { + void testGetDefaultStreamName() { TableName tableName = TableName.of("projectId", "datasetId", "tableId"); String actualDefaultName = StreamWriter.getDefaultStreamName(tableName); @@ -2554,7 +2531,7 @@ public void testGetDefaultStreamName() { } @Test - public void testLocationCacheIsHit() throws Exception { + void testLocationCacheIsHit() throws Exception { WriteStream expectedResponse = WriteStream.newBuilder() .setName(WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) @@ -2582,7 +2559,7 @@ public void testLocationCacheIsHit() throws Exception { } @Test - public void testLocationCacheExpires() throws Exception { + void testLocationCacheExpires() throws Exception { // force cache to expire in 1000 millis StreamWriter.recreateProjectLocationCache(1000); WriteStream expectedResponse = diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryBigDecimalByteStringEncoderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryBigDecimalByteStringEncoderTest.java index cd2195d066..641531bb02 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryBigDecimalByteStringEncoderTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryBigDecimalByteStringEncoderTest.java @@ -16,7 +16,7 @@ package com.google.cloud.bigquery.storage.v1.it; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.api.core.ApiFuture; import com.google.cloud.ServiceOptions; @@ -44,14 +44,15 @@ import java.math.BigDecimal; import java.util.Iterator; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import org.json.JSONArray; import org.json.JSONObject; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; -public class ITBigQueryBigDecimalByteStringEncoderTest { +class ITBigQueryBigDecimalByteStringEncoderTest { private static final Logger LOG = Logger.getLogger(ITBigQueryBigDecimalByteStringEncoderTest.class.getName()); private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); @@ -62,8 +63,8 @@ public class ITBigQueryBigDecimalByteStringEncoderTest { private static TableInfo tableInfo; private static BigQuery bigquery; - @BeforeClass - public static void beforeClass() throws IOException { + @BeforeAll + static void beforeAll() throws IOException { client = BigQueryWriteClient.create(); RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); @@ -90,10 +91,11 @@ public static void beforeClass() throws IOException { bigquery.create(tableInfo); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() throws InterruptedException { if (client != null) { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } if (bigquery != null) { RemoteBigQueryHelper.forceDelete(bigquery, DATASET); @@ -101,7 +103,7 @@ public static void afterClass() { } @Test - public void TestBigDecimalEncoding() + void TestBigDecimalEncoding() throws IOException, InterruptedException, ExecutionException, diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageLongRunningTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageLongRunningTest.java index 9ca508bac4..01c44caa9b 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageLongRunningTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageLongRunningTest.java @@ -16,7 +16,7 @@ package com.google.cloud.bigquery.storage.v1.it; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.ServiceOptions; @@ -26,6 +26,7 @@ import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; import com.google.cloud.bigquery.storage.v1.ReadSession; import com.google.cloud.bigquery.storage.v1.ReadStream; +import com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -34,17 +35,18 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Integration tests for BigQuery Storage API which target long running sessions. These tests can be * enabled by setting the system property 'bigquery.storage.enable_long_running_tests' to true. */ -public class ITBigQueryStorageLongRunningTest { +class ITBigQueryStorageLongRunningTest { private static final Logger LOG = Logger.getLogger(ITBigQueryStorageLongRunningTest.class.getName()); @@ -61,9 +63,10 @@ public class ITBigQueryStorageLongRunningTest { private static BigQueryReadClient client; private static String parentProjectId; - @BeforeClass - public static void beforeClass() throws IOException { - Assume.assumeTrue(LONG_TESTS_DISABLED_MESSAGE, Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY)); + @BeforeAll + static void beforeAll() throws IOException { + Assumptions.assumeTrue( + Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY), LONG_TESTS_DISABLED_MESSAGE); client = BigQueryReadClient.create(); parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); @@ -73,20 +76,21 @@ public static void beforeClass() throws IOException { ITBigQueryStorageLongRunningTest.class.getSimpleName(), parentProjectId)); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() throws InterruptedException { if (client != null) { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } } @Test - public void testLongRunningReadSession() throws InterruptedException, ExecutionException { + void testLongRunningReadSession() throws InterruptedException, ExecutionException { // This test reads a larger table with the goal of doing a simple validation of timeout settings // for a longer running session. String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "wikipedia"); @@ -101,26 +105,21 @@ public void testLongRunningReadSession() throws InterruptedException, ExecutionE /* maxStreamCount= */ 5); assertEquals( + 5, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 5, - session.getStreamsCount()); + table, session.toString())); List> tasks = new ArrayList<>(session.getStreamsCount()); for (final ReadStream stream : session.getStreamsList()) { - tasks.add( - new Callable() { - @Override - public Long call() throws Exception { - return readAllRowsFromStream(stream); - } - }); + tasks.add(() -> readAllRowsFromStream(stream)); } ExecutorService executor = Executors.newFixedThreadPool(tasks.size()); List> results = executor.invokeAll(tasks); + executor.shutdown(); long rowCount = 0; for (Future result : results) { diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageReadClientTest.java similarity index 70% rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageTest.java rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageReadClientTest.java index a8f7b0b8bc..98ab9edb5b 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageReadClientTest.java @@ -16,24 +16,24 @@ package com.google.cloud.bigquery.storage.v1.it; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.api.core.ApiFuture; -import com.google.api.core.ApiFutureCallback; import com.google.api.core.ApiFutures; import com.google.api.gax.core.FixedCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; import com.google.api.gax.retrying.RetrySettings; import com.google.api.gax.rpc.ServerStream; import com.google.api.gax.rpc.UnauthenticatedException; -import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.RetryOption; import com.google.cloud.ServiceOptions; import com.google.cloud.bigquery.BigQuery; @@ -54,6 +54,7 @@ import com.google.cloud.bigquery.TableInfo; import com.google.cloud.bigquery.TimePartitioning; import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions; import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; import com.google.cloud.bigquery.storage.v1.BigQueryReadSettings; import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; @@ -62,20 +63,23 @@ import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; import com.google.cloud.bigquery.storage.v1.ReadSession; -import com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers; import com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions; import com.google.cloud.bigquery.storage.v1.ReadStream; import com.google.cloud.bigquery.storage.v1.TableFieldSchema; import com.google.cloud.bigquery.storage.v1.TableName; import com.google.cloud.bigquery.storage.v1.TableSchema; -import com.google.cloud.bigquery.storage.v1.it.SimpleRowReaderArrow.ArrowRangeBatchConsumer; -import com.google.cloud.bigquery.storage.v1.it.SimpleRowReaderAvro.AvroRowConsumer; +import com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource; +import com.google.cloud.bigquery.storage.v1.it.util.Helper; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderArrow; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderArrow.ArrowRangeBatchConsumer; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderAvro; +import com.google.cloud.bigquery.storage.v1.it.util.SimpleRowReaderAvro.AvroRowConsumer; import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.MoreExecutors; import com.google.protobuf.Descriptors.DescriptorValidationException; -import com.google.protobuf.Timestamp; +import com.google.protobuf.Int64Value; import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.sdk.OpenTelemetrySdk; @@ -84,9 +88,7 @@ import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; import io.opentelemetry.sdk.trace.samplers.Sampler; -import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.InputStream; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.time.Duration; @@ -101,29 +103,35 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; +import java.util.stream.Collectors; import org.apache.avro.Conversions; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData; -import org.apache.avro.generic.GenericRecordBuilder; import org.apache.avro.util.Utf8; import org.json.JSONArray; import org.json.JSONObject; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** Integration tests for BigQuery Storage API. */ -public class ITBigQueryStorageTest { - private static final Logger LOG = Logger.getLogger(ITBigQueryStorageTest.class.getName()); +class ITBigQueryStorageReadClientTest { + private static final Logger LOG = + Logger.getLogger(ITBigQueryStorageReadClientTest.class.getName()); private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); private static final String DESCRIPTION = "BigQuery Storage Java client test dataset"; + private static final String BQSTORAGE_TIMESTAMP_READ_TABLE = "bqstorage_timestamp_read"; + private static final int SHAKESPEARE_SAMPLE_ROW_COUNT = 164_656; + private static final int SHAKESPEARE_SAMPELS_ROWS_MORE_THAN_100_WORDS = 1_333; + private static final int MAX_STREAM_COUNT = 1; - private static BigQueryReadClient client; + private static BigQueryReadClient readClient; private static String projectName; private static String parentProjectId; private static BigQuery bigquery; @@ -269,7 +277,7 @@ public class ITBigQueryStorageTest { .build()) .build(); - private static final ImmutableMap RANGE_TEST_VALUES_DATES = + private static final Map RANGE_TEST_VALUES_DATES = new ImmutableMap.Builder() .put( "bounded", @@ -302,7 +310,7 @@ public class ITBigQueryStorageTest { .build(); // dates are returned as days since epoch - private static final ImmutableMap RANGE_TEST_VALUES_EXPECTED_DATES = + private static final Map RANGE_TEST_VALUES_EXPECTED_DATES = new ImmutableMap.Builder() .put( "bounded", @@ -494,16 +502,16 @@ public CompletableResultCode shutdown() { } } - @BeforeClass - public static void beforeClass() throws IOException { - client = BigQueryReadClient.create(); + @BeforeAll + static void beforeAll() throws IOException, DescriptorValidationException, InterruptedException { + readClient = BigQueryReadClient.create(); projectName = ServiceOptions.getDefaultProjectId(); parentProjectId = String.format("projects/%s", projectName); LOG.info( String.format( "%s tests running with parent project: %s", - ITBigQueryStorageTest.class.getSimpleName(), parentProjectId)); + ITBigQueryStorageReadClientTest.class.getSimpleName(), parentProjectId)); RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); bigquery = bigqueryHelper.getOptions().getService(); @@ -513,12 +521,71 @@ public static void beforeClass() throws IOException { .build(); bigquery.create(datasetInfo); LOG.info("Created test dataset: " + DATASET); + + setupTimestampTable(); + } + + private static void setupTimestampTable() + throws DescriptorValidationException, IOException, InterruptedException { + // Schema to create a BQ table + com.google.cloud.bigquery.Schema timestampSchema = + com.google.cloud.bigquery.Schema.of( + Field.newBuilder(TIMESTAMP_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setMode(Mode.NULLABLE) + .build(), + Field.newBuilder(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setTimestampPrecision(Helper.PICOSECOND_PRECISION) + .setMode(Mode.NULLABLE) + .build()); + + // Create BQ table with timestamps + TableId tableId = TableId.of(DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + bigquery.create(TableInfo.of(tableId, StandardTableDefinition.of(timestampSchema))); + + TableName parentTable = TableName.of(projectName, DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + + // Define the BQStorage schema to write to + TableSchema timestampTableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_COLUMN_NAME) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .addFields( + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME) + .setTimestampPrecision( + Int64Value.newBuilder().setValue(Helper.PICOSECOND_PRECISION).build()) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build()) + .build(); + + try (JsonStreamWriter writer = + JsonStreamWriter.newBuilder(parentTable.toString(), timestampTableSchema).build()) { + JSONArray data = new JSONArray(); + for (Object[] timestampData : Helper.INPUT_TIMESTAMPS) { + JSONObject row = new JSONObject(); + row.put(TIMESTAMP_COLUMN_NAME, timestampData[0]); + row.put(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, timestampData[1]); + data.put(row); + } + + ApiFuture future = writer.append(data); + // The append method is asynchronous. Rather than waiting for the method to complete, + // which can hurt performance, register a completion callback and continue streaming. + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } } - @AfterClass - public static void afterClass() { - if (client != null) { - client.close(); + @AfterAll + static void afterAll() throws InterruptedException { + if (readClient != null) { + readClient.close(); + readClient.awaitTermination(10, TimeUnit.SECONDS); } if (bigquery != null) { @@ -528,15 +595,15 @@ public static void afterClass() { } @Test - public void testSimpleReadAvro() { + void testSimpleReadAvro() { String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); ReadSession session = - client.createReadSession( + readClient.createReadSession( /* parent= */ parentProjectId, /* readSession= */ ReadSession.newBuilder() .setTable(table) @@ -544,35 +611,35 @@ public void testSimpleReadAvro() { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); long rowCount = 0; - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { rowCount += response.getRowCount(); } - assertEquals(164_656, rowCount); + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); } @Test - public void testSimpleReadArrow() { + void testSimpleReadArrow() { String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); ReadSession session = - client.createReadSession( + readClient.createReadSession( /* parent= */ parentProjectId, /* readSession= */ ReadSession.newBuilder() .setTable(table) @@ -580,12 +647,12 @@ public void testSimpleReadArrow() { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); // Assert that there are streams available in the session. An empty table may not have // data available. If no sessions are available for an anonymous (cached) table, consider @@ -601,18 +668,18 @@ public void testSimpleReadArrow() { long rowCount = 0; // Process each block of rows as they arrive and decode using our simple row reader. - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { Preconditions.checkState(response.hasArrowRecordBatch()); rowCount += response.getRowCount(); } - assertEquals(164_656, rowCount); + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); } @Test - public void testRangeTypeSimple() throws InterruptedException { + void testRangeTypeSimple() throws InterruptedException { // Create table with Range values. - String tableName = "test_range_type_read"; + String tableName = "test_range_type_read" + UUID.randomUUID().toString().substring(0, 8); TableId tableId = TableId.of(DATASET, tableName); QueryJobConfiguration createTable = QueryJobConfiguration.newBuilder( @@ -630,13 +697,13 @@ public void testRangeTypeSimple() throws InterruptedException { bigquery.query(createTable); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ ServiceOptions.getDefaultProjectId(), /* datasetId= */ DATASET, /* tableId= */ tableId.getTable()); ReadSession session = - client.createReadSession( + readClient.createReadSession( /* parent= */ parentProjectId, /* readSession= */ ReadSession.newBuilder() .setTable(table) @@ -644,12 +711,12 @@ public void testRangeTypeSimple() throws InterruptedException { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); // Assert that there are streams available in the session. An empty table may not have // data available. If no sessions are available for an anonymous (cached) table, consider @@ -664,7 +731,7 @@ public void testRangeTypeSimple() throws InterruptedException { ReadRowsRequest.newBuilder().setReadStream(streamName).build(); long rowCount = 0; - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { Preconditions.checkState(response.hasArrowRecordBatch()); rowCount += response.getRowCount(); @@ -673,10 +740,10 @@ public void testRangeTypeSimple() throws InterruptedException { } @Test - public void testRangeTypeWrite() + void testRangeTypeWrite() throws InterruptedException, IOException, DescriptorValidationException { - // Create table with Range fields. - String tableName = "test_range_type_write"; + // Create table with Range values. + String tableName = "test_range_type_write" + UUID.randomUUID().toString().substring(0, 8); TableId tableId = TableId.of(DATASET, tableName); bigquery.create(TableInfo.of(tableId, StandardTableDefinition.of(RANGE_SCHEMA))); @@ -735,16 +802,17 @@ public void testRangeTypeWrite() ApiFuture future = writer.append(data); // The append method is asynchronous. Rather than waiting for the method to complete, // which can hurt performance, register a completion callback and continue streaming. - ApiFutures.addCallback(future, new AppendCompleteCallback(), MoreExecutors.directExecutor()); + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); } String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableId.getTable()); ReadSession session = - client.createReadSession( + readClient.createReadSession( /* parent= */ parentProjectId, /* readSession= */ ReadSession.newBuilder() .setTable(table) @@ -752,13 +820,12 @@ public void testRangeTypeWrite() .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); - + table, session.toString())); // Assert that there are streams available in the session. An empty table may not have // data available. If no sessions are available for an anonymous (cached) table, consider // writing results of a query to a named table rather than consuming cached results @@ -782,7 +849,7 @@ public void testRangeTypeWrite() long rowCount = 0; // Process each block of rows as they arrive and decode using our simple row reader. - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { Preconditions.checkState(response.hasArrowRecordBatch()); reader.processRows( @@ -797,16 +864,98 @@ public void testRangeTypeWrite() } } + // Tests that inputs for micros and picos can be read properly via Arrow + @Test + void timestamp_readArrow() throws IOException { + String table = + BigQueryResource.formatTableResource(projectName, DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + ReadSession session = + readClient.createReadSession( + parentProjectId, + ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.ARROW) + .setReadOptions( + TableReadOptions.newBuilder() + .setArrowSerializationOptions( + ArrowSerializationOptions.newBuilder() + // This serialization option only impacts columns that are type + // `TIMESTAMP_PICOS` and has no impact on other columns types + .setPicosTimestampPrecision( + ArrowSerializationOptions.PicosTimestampPrecision + .TIMESTAMP_PRECISION_PICOS) + .build()) + .build()) + .build(), + MAX_STREAM_COUNT); + assertEquals( + MAX_STREAM_COUNT, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + // that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + assertThat(session.getStreamsCount()).isGreaterThan(0); + + // Set up a simple reader and start a read session. + try (SimpleRowReaderArrow reader = new SimpleRowReaderArrow(session.getArrowSchema())) { + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + long rowCount = 0; + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + reader.processRows( + response.getArrowRecordBatch(), + new SimpleRowReaderArrow.ArrowTimestampBatchConsumer( + Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT)); + rowCount += response.getRowCount(); + } + assertEquals(Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT.length, rowCount); + } + } + + // Tests that inputs for micros and picos can be read properly via Avro @Test - public void testSimpleReadAndResume() { + void timestamp_readAvro() throws IOException { String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource(projectName, DATASET, BQSTORAGE_TIMESTAMP_READ_TABLE); + List rows = Helper.readAllRows(readClient, parentProjectId, table, null); + List timestamps = + rows.stream().map(x -> (Long) x.get(TIMESTAMP_COLUMN_NAME)).collect(Collectors.toList()); + List timestampHigherPrecision = + rows.stream() + .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString()) + .collect(Collectors.toList()); + for (int i = 0; i < timestamps.size(); i++) { + assertEquals(Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT[i][0], timestamps.get(i)); + } + for (int i = 0; i < timestampHigherPrecision.size(); i++) { + assertEquals( + Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT[i][1], + timestampHigherPrecision.get(i)); + } + } + + @Test + void testSimpleReadAndResume() { + String table = + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); ReadSession session = - client.createReadSession( + readClient.createReadSession( /* parent= */ parentProjectId, /* readSession= */ ReadSession.newBuilder() .setTable(table) @@ -814,16 +963,14 @@ public void testSimpleReadAndResume() { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); - // We have to read some number of rows in order to be able to resume. More details: - - long rowCount = ReadStreamToOffset(session.getStreams(0), /* rowOffset= */ 34_846); + long rowCount = readStreamToOffset(session.getStreams(0), /* rowOffset= */ 34_846); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder() @@ -831,7 +978,7 @@ public void testSimpleReadAndResume() { .setOffset(rowCount) .build(); - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { rowCount += response.getRowCount(); @@ -839,13 +986,13 @@ public void testSimpleReadAndResume() { // Verifies that the number of rows skipped and read equals to the total number of rows in the // table. - assertEquals(164_656, rowCount); + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); } @Test - public void testFilter() throws IOException { + void testFilter() throws IOException { String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); @@ -865,14 +1012,14 @@ public void testFilter() throws IOException { .build()) .build(); - ReadSession session = client.createReadSession(request); + ReadSession session = readClient.createReadSession(request); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); @@ -882,29 +1029,31 @@ public void testFilter() throws IOException { long rowCount = 0; - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { rowCount += response.getRowCount(); reader.processRows( response.getAvroRows(), - new AvroRowConsumer() { - @Override - public void accept(GenericData.Record record) { - Long wordCount = (Long) record.get("word_count"); - assertWithMessage("Row not matching expectations: %s", record.toString()) - .that(wordCount) - .isGreaterThan(100L); - } - }); + (AvroRowConsumer) + record -> { + String rowAssertMessage = + String.format("Row not matching expectations: %s", record.toString()); + + Long wordCount = (Long) record.get("word_count"); + assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L); + + Utf8 word = (Utf8) record.get("word"); + assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0); + }); } assertEquals(1_333, rowCount); } @Test - public void testColumnSelection() throws IOException { + void testColumnSelection() throws IOException { String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); @@ -928,14 +1077,14 @@ public void testColumnSelection() throws IOException { .build()) .build(); - ReadSession session = client.createReadSession(request); + ReadSession session = readClient.createReadSession(request); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); @@ -945,45 +1094,43 @@ public void testColumnSelection() throws IOException { String actualSchemaMessage = String.format( "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); - assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size()); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, avroSchema.getField("word").schema().getType()); + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("word_count").schema().getType()); + avroSchema.getField("word_count").schema().getType(), + actualSchemaMessage); SimpleRowReaderAvro reader = new SimpleRowReaderAvro(avroSchema); long rowCount = 0; - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { rowCount += response.getRowCount(); reader.processRows( response.getAvroRows(), - new AvroRowConsumer() { - @Override - public void accept(GenericData.Record record) { - String rowAssertMessage = - String.format("Row not matching expectations: %s", record.toString()); - - Long wordCount = (Long) record.get("word_count"); - assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L); - - Utf8 word = (Utf8) record.get("word"); - assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0); - } - }); + (AvroRowConsumer) + record -> { + String rowAssertMessage = + String.format("Row not matching expectations: %s", record.toString()); + + Long wordCount = (Long) record.get("word_count"); + assertWithMessage(rowAssertMessage).that(wordCount).isGreaterThan(100L); + + Utf8 word = (Utf8) record.get("word"); + assertWithMessage(rowAssertMessage).that(word.length()).isGreaterThan(0); + }); } - assertEquals(1_333, rowCount); + assertEquals(SHAKESPEARE_SAMPELS_ROWS_MORE_THAN_100_WORDS, rowCount); } @Test - public void testReadAtSnapshot() throws InterruptedException, IOException { + void testReadAtSnapshot() throws InterruptedException, IOException { Field intFieldSchema = Field.newBuilder("col", LegacySQLTypeName.INTEGER) .setMode(Mode.REQUIRED) @@ -995,24 +1142,24 @@ public void testReadAtSnapshot() throws InterruptedException, IOException { TableId testTableId = TableId.of(/* dataset= */ DATASET, /* table= */ "test_read_snapshot"); bigquery.create(TableInfo.of(testTableId, StandardTableDefinition.of(tableSchema))); - testTableId.toString(); - Job firstJob = - RunQueryAppendJobAndExpectSuccess( + runQueryAppendJobAndExpectSuccess( /* destinationTableId= */ testTableId, /* query= */ "SELECT 1 AS col"); Job secondJob = - RunQueryAppendJobAndExpectSuccess( + runQueryAppendJobAndExpectSuccess( /* destinationTableId= */ testTableId, /* query= */ "SELECT 2 AS col"); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ testTableId.getTable()); final List rowsAfterFirstSnapshot = new ArrayList<>(); - ProcessRowsAtSnapshot( + Helper.processRowsAtSnapshot( + readClient, + parentProjectId, /* table= */ table, /* snapshotInMillis= */ firstJob.getStatistics().getEndTime(), /* filter= */ null, @@ -1022,10 +1169,12 @@ public void accept(GenericData.Record record) { rowsAfterFirstSnapshot.add((Long) record.get("col")); } }); - assertEquals(Arrays.asList(1L), rowsAfterFirstSnapshot); + assertEquals(Collections.singletonList(1L), rowsAfterFirstSnapshot); final List rowsAfterSecondSnapshot = new ArrayList<>(); - ProcessRowsAtSnapshot( + Helper.processRowsAtSnapshot( + readClient, + parentProjectId, /* table= */ table, /* snapshotInMillis= */ secondJob.getStatistics().getEndTime(), /* filter= */ null, @@ -1040,8 +1189,9 @@ public void accept(GenericData.Record record) { } @Test - public void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { - String partitionedTableName = "test_column_partition_table_by_date"; + void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { + String partitionedTableName = + "test_column_partition_table_by_date" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s (num_field INT64, date_field DATE) " @@ -1057,26 +1207,31 @@ public void testColumnPartitionedTableByDateField() throws InterruptedException, + " SELECT 3, CAST(\"2019-01-03\" AS DATE)", DATASET, partitionedTableName); - RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ partitionedTableName); - List unfilteredRows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + unfilteredRows.toString(), 3, unfilteredRows.size()); + List unfilteredRows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(3, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); List partitionFilteredRows = - ReadAllRows(/* table= */ table, /* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)"); + Helper.readAllRows( + readClient, + parentProjectId, + /* table= */ table, + /* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)"); assertEquals( - "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size()); + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); } @Test - public void testIngestionTimePartitionedTable() throws InterruptedException, IOException { + void testIngestionTimePartitionedTable() throws InterruptedException, IOException { Field intFieldSchema = Field.newBuilder("num_field", LegacySQLTypeName.INTEGER) .setMode(Mode.REQUIRED) @@ -1096,36 +1251,41 @@ public void testIngestionTimePartitionedTable() throws InterruptedException, IOE .build())); // Simulate ingestion for 2019-01-01. - RunQueryAppendJobAndExpectSuccess( + runQueryAppendJobAndExpectSuccess( /* destinationTableId= */ TableId.of( /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190101"), /* query= */ "SELECT 1 AS num_field"); // Simulate ingestion for 2019-01-02. - RunQueryAppendJobAndExpectSuccess( + runQueryAppendJobAndExpectSuccess( /* destinationTableId= */ TableId.of( /* dataset= */ DATASET, /* table= */ testTableId.getTable() + "$20190102"), /* query= */ "SELECT 2 AS num_field"); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ testTableId.getDataset(), /* tableId= */ testTableId.getTable()); - List unfilteredRows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + unfilteredRows.toString(), 2, unfilteredRows.size()); + List unfilteredRows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(2, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); List partitionFilteredRows = - ReadAllRows(/* table= */ table, /* filter= */ "_PARTITIONDATE > \"2019-01-01\""); + Helper.readAllRows( + readClient, + parentProjectId, + /* table= */ table, + /* filter= */ "_PARTITIONDATE > \"2019-01-01\""); assertEquals( - "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size()); + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); } @Test - public void testBasicSqlTypes() throws InterruptedException, IOException { - String tableName = "test_basic_sql_types"; + void testBasicSqlTypes() throws InterruptedException, IOException { + String tableName = "test_basic_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -1148,14 +1308,15 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { + " b\"абвгд\"", DATASET, tableName); - RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); - List rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: \" + rows.toString()"); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -1165,22 +1326,22 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 6, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(6, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, avroSchema.getField("int_field").schema().getType()); - assertEquals(rowAssertMessage, 17L, (long) record.get("int_field")); + Schema.Type.LONG, avroSchema.getField("int_field").schema().getType(), actualSchemaMessage); + assertEquals(17L, (long) record.get("int_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BYTES, - avroSchema.getField("num_field").schema().getType()); + avroSchema.getField("num_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.decimal(/* precision= */ 38, /* scale= */ 9), - avroSchema.getField("num_field").schema().getLogicalType()); + avroSchema.getField("num_field").schema().getLogicalType(), + actualSchemaMessage); BigDecimal actual_num_field = new Conversions.DecimalConversion() .fromBytes( @@ -1188,45 +1349,42 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { avroSchema, avroSchema.getField("num_field").schema().getLogicalType()); assertEquals( - rowAssertMessage, BigDecimal.valueOf(/* unscaledVal= */ 1_234_560_000_000L, /* scale= */ 9), - actual_num_field); + actual_num_field, + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.DOUBLE, - avroSchema.getField("float_field").schema().getType()); - assertEquals( - rowAssertMessage, - /* expected= */ 6.547678d, - /* actual= */ (double) record.get("float_field"), - /* delta= */ 0.0001); + avroSchema.getField("float_field").schema().getType(), + actualSchemaMessage); + assertEquals(6.547678d, (double) record.get("float_field"), 0.0001, rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BOOLEAN, - avroSchema.getField("bool_field").schema().getType()); - assertEquals(rowAssertMessage, true, record.get("bool_field")); + avroSchema.getField("bool_field").schema().getType(), + actualSchemaMessage); + assertEquals(true, record.get("bool_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("str_field").schema().getType()); - assertEquals(rowAssertMessage, new Utf8("String field value"), record.get("str_field")); + avroSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("String field value"), record.get("str_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BYTES, - avroSchema.getField("bytes_field").schema().getType()); + avroSchema.getField("bytes_field").schema().getType(), + actualSchemaMessage); assertArrayEquals( - rowAssertMessage, Utf8.getBytesFor("абвгд"), - ((ByteBuffer) (record.get("bytes_field"))).array()); + ((ByteBuffer) (record.get("bytes_field"))).array(), + rowAssertMessage); } @Test - public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { - String tableName = "test_date_and_time_sql_types"; + void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + String tableName = + "test_date_and_time_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -1245,14 +1403,15 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + " CAST(\"2019-04-30 19:24:19.123456 UTC\" AS TIMESTAMP)", DATASET, tableName); - RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); - List rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -1262,56 +1421,54 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 4, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(4, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.INT, avroSchema.getField("date_field").schema().getType()); + Schema.Type.INT, avroSchema.getField("date_field").schema().getType(), actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.date(), - avroSchema.getField("date_field").schema().getLogicalType()); + avroSchema.getField("date_field").schema().getLogicalType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, LocalDate.of(/* year= */ 2019, /* month= */ 5, /* dayOfMonth= */ 31), - LocalDate.ofEpochDay((int) record.get("date_field"))); + LocalDate.ofEpochDay((int) record.get("date_field")), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("datetime_field").schema().getType()); + avroSchema.getField("datetime_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, "datetime", - avroSchema.getField("datetime_field").schema().getObjectProp("logicalType")); + avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"), + actualSchemaMessage); assertEquals( - rowAssertMessage, - new Utf8("2019-04-30T21:47:59.999999"), - (Utf8) record.get("datetime_field")); + new Utf8("2019-04-30T21:47:59.999999"), record.get("datetime_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("time_field").schema().getType()); + avroSchema.getField("time_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.timeMicros(), - avroSchema.getField("time_field").schema().getLogicalType()); + avroSchema.getField("time_field").schema().getLogicalType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, LocalTime.of( /* hour= */ 21, /* minute= */ 47, /* second= */ 59, /* nanoOfSecond= */ 999_999_000), - LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field"))); + LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("timestamp_field").schema().getType()); + avroSchema.getField("timestamp_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.timestampMicros(), - avroSchema.getField("timestamp_field").schema().getLogicalType()); + avroSchema.getField("timestamp_field").schema().getLogicalType(), + actualSchemaMessage); ZonedDateTime expected_timestamp = ZonedDateTime.parse( "2019-04-30T19:24:19Z", DateTimeFormatter.ISO_INSTANT.withZone(ZoneOffset.UTC)) @@ -1323,12 +1480,12 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { /* epochSecond= */ actual_timestamp_micros / 1_000_000, (actual_timestamp_micros % 1_000_000) * 1_000), ZoneOffset.UTC); - assertEquals(rowAssertMessage, expected_timestamp, actual_timestamp); + assertEquals(expected_timestamp, actual_timestamp, rowAssertMessage); } @Test - public void testGeographySqlType() throws InterruptedException, IOException { - String tableName = "test_geography_sql_type"; + void testGeographySqlType() throws InterruptedException, IOException { + String tableName = "test_geography_sql_type" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -1340,14 +1497,15 @@ public void testGeographySqlType() throws InterruptedException, IOException { + " SELECT ST_GEOGPOINT(1.1, 2.2)", DATASET, tableName); - RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); - List rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -1357,40 +1515,42 @@ public void testGeographySqlType() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 1, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(1, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("geo_field").schema().getType()); + avroSchema.getField("geo_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, "GEOGRAPHY", - avroSchema.getField("geo_field").schema().getObjectProp("sqlType")); - assertEquals(rowAssertMessage, new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field")); + avroSchema.getField("geo_field").schema().getObjectProp("sqlType"), + actualSchemaMessage); + assertEquals(new Utf8("POINT(1.1 2.2)"), record.get("geo_field"), rowAssertMessage); } @Test - public void testStructAndArraySqlTypes() throws InterruptedException, IOException { - String tableName = "test_struct_and_array_sql_types"; + void testStructAndArraySqlTypes() throws InterruptedException, IOException { + String tableName = + "test_struct_and_array_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s (array_field ARRAY, struct_field STRUCT NOT NULL) OPTIONS( description=\"a" + " table with array and time column types\" ) AS SELECT [1, 2, 3], " - + " (10, 'abc')", + + " (10, 'abc')", DATASET, tableName); - RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); + runQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ projectName, /* datasetId= */ DATASET, /* tableId= */ tableName); - List rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + List rows = + Helper.readAllRows(readClient, parentProjectId, /* table= */ table, /* filter= */ null); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -1400,66 +1560,66 @@ public void testStructAndArraySqlTypes() throws InterruptedException, IOExceptio "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.ARRAY, - avroSchema.getField("array_field").schema().getType()); + avroSchema.getField("array_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("array_field").schema().getElementType().getType()); + avroSchema.getField("array_field").schema().getElementType().getType(), + actualSchemaMessage); assertArrayEquals( - rowAssertMessage, new Long[] {1L, 2L, 3L}, - ((GenericData.Array) record.get("array_field")).toArray(new Long[0])); + ((GenericData.Array) record.get("array_field")).toArray(new Long[0]), + rowAssertMessage); // Validate the STRUCT field and its members. Schema structSchema = avroSchema.getField("struct_field").schema(); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, structSchema.getType()); + assertEquals(Schema.Type.RECORD, structSchema.getType(), actualSchemaMessage); GenericData.Record structRecord = (GenericData.Record) record.get("struct_field"); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - structSchema.getField("int_field").schema().getType()); - assertEquals(rowAssertMessage, 10L, (long) structRecord.get("int_field")); + structSchema.getField("int_field").schema().getType(), + actualSchemaMessage); + assertEquals(10L, (long) structRecord.get("int_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - structSchema.getField("str_field").schema().getType()); - assertEquals(rowAssertMessage, new Utf8("abc"), structRecord.get("str_field")); + structSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("abc"), structRecord.get("str_field"), rowAssertMessage); } @Test - public void testSimpleReadWithBackgroundExecutorProvider() throws IOException { + void testSimpleReadWithBackgroundExecutorProvider() throws IOException { BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder() .setBackgroundExecutorProvider( InstantiatingExecutorProvider.newBuilder().setExecutorThreadCount(14).build()) .build(); // Overriding the default client - client = BigQueryReadClient.create(bigQueryReadSettings); + readClient = BigQueryReadClient.create(bigQueryReadSettings); assertTrue( - client.getStub().getStubSettings().getBackgroundExecutorProvider() + readClient.getStub().getStubSettings().getBackgroundExecutorProvider() instanceof InstantiatingExecutorProvider); assertEquals( 14, ((InstantiatingExecutorProvider) - client.getStub().getStubSettings().getBackgroundExecutorProvider()) + readClient.getStub().getStubSettings().getBackgroundExecutorProvider()) .getExecutorThreadCount()); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); ReadSession session = - client.createReadSession( + readClient.createReadSession( /* parent= */ parentProjectId, /* readSession= */ ReadSession.newBuilder() .setTable(table) @@ -1467,104 +1627,104 @@ public void testSimpleReadWithBackgroundExecutorProvider() throws IOException { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); long rowCount = 0; - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { rowCount += response.getRowCount(); } - assertEquals(164_656, rowCount); + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); } @Test - public void testUniverseDomainWithInvalidUniverseDomain() throws IOException { + void testUniverseDomainWithInvalidUniverseDomain() throws IOException { BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder() .setCredentialsProvider( - FixedCredentialsProvider.create(loadCredentials(FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN))) + FixedCredentialsProvider.create( + Helper.loadCredentials(FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN))) .setUniverseDomain("invalid.domain") .build(); BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); - try { - localClient.createReadSession( - /* parent= */ parentProjectId, - /* readSession= */ ReadSession.newBuilder() - .setTable(table) - .setDataFormat(DataFormat.AVRO) - .build(), - /* maxStreamCount= */ 1); - fail("RPCs to invalid universe domain should fail"); - } catch (UnauthenticatedException e) { - assertThat( - (e.getMessage() - .contains("does not match the universe domain found in the credentials"))) - .isTrue(); - } + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1), + "RPCs to invalid universe domain should fail"); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); localClient.close(); } @Test - public void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { + void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder() .setCredentialsProvider( FixedCredentialsProvider.create( - loadCredentials(FAKE_JSON_CRED_WITH_INVALID_DOMAIN))) + Helper.loadCredentials(FAKE_JSON_CRED_WITH_INVALID_DOMAIN))) .setUniverseDomain("invalid.domain") .build(); BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); - try { - ReadSession session = - localClient.createReadSession( - /* parent= */ parentProjectId, - /* readSession= */ ReadSession.newBuilder() - .setTable(table) - .setDataFormat(DataFormat.AVRO) - .build(), - /* maxStreamCount= */ 1); - fail("RPCs to invalid universe domain should fail"); - } catch (UnauthenticatedException e) { - assertThat( - (e.getMessage() - .contains("does not match the universe domain found in the credentials"))) - .isTrue(); - } + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1), + "RPCs to invalid universe domain should fail"); + assertTrue( + e.getMessage().contains("does not match the universe domain found in the credentials")); localClient.close(); } @Test - public void testUniverseDomainWithMatchingDomain() throws IOException { + void testUniverseDomainWithMatchingDomain() throws IOException { // Test a valid domain using the default credentials and Google default universe domain. BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder().setUniverseDomain("googleapis.com").build(); BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); @@ -1582,17 +1742,17 @@ public void testUniverseDomainWithMatchingDomain() throws IOException { ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); long rowCount = 0; - ServerStream stream = client.readRowsCallable().call(readRowsRequest); + ServerStream stream = readClient.readRowsCallable().call(readRowsRequest); for (ReadRowsResponse response : stream) { rowCount += response.getRowCount(); } - assertEquals(164_656, rowCount); + assertEquals(SHAKESPEARE_SAMPLE_ROW_COUNT, rowCount); localClient.close(); } @Test - public void testSimpleReadWithOtelTracing() throws IOException { + void testSimpleReadWithOtelTracing() throws IOException { SdkTracerProvider tracerProvider = SdkTracerProvider.builder() .addSpanProcessor(SimpleSpanProcessor.create(new TestSpanExporter())) @@ -1608,7 +1768,7 @@ public void testSimpleReadWithOtelTracing() throws IOException { BigQueryReadClient otelClient = BigQueryReadClient.create(otelSettings); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); @@ -1641,10 +1801,10 @@ public void testSimpleReadWithOtelTracing() throws IOException { // createReadSession is the parent span of createReadSessionCallable assertEquals( + "com.google.cloud.bigquery.storage.v1.read.createReadSession", OTEL_SPAN_IDS_TO_NAMES.get( OTEL_PARENT_SPAN_IDS.get( - "com.google.cloud.bigquery.storage.v1.read.createReadSessionCallable")), - "com.google.cloud.bigquery.storage.v1.read.createReadSession"); + "com.google.cloud.bigquery.storage.v1.read.createReadSessionCallable"))); Map, Object> createReadSessionMap = OTEL_ATTRIBUTES.get("com.google.cloud.bigquery.storage.v1.read.createReadSession"); @@ -1653,21 +1813,21 @@ public void testSimpleReadWithOtelTracing() throws IOException { createReadSessionMap.get( AttributeKey.longKey("bq.storage.read_session.request.max_stream_count"))); assertEquals( + 1L, createReadSessionMap.get( - AttributeKey.longKey("bq.storage.read_session.request.max_stream_count")), - 1L); + AttributeKey.longKey("bq.storage.read_session.request.max_stream_count"))); } - public void testUniverseDomain() throws IOException { + void testUniverseDomain() throws IOException { // This test is not yet part presubmit integration test as it requires the apis-tpclp.goog // universe domain credentials. - // Test a valid read session in the universe domain gdutst. + // Test a valid domain using the default credentials and Google default universe domain. BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder().setUniverseDomain("apis-tpclp.goog").build(); BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); String table = - BigQueryResource.FormatTableResource( + BigQueryResource.formatTableResource( /* projectId= */ "google-tpc-testing-environment:cloudsdk-test-project", /* datasetId= */ "tpc_demo_dataset", /* tableId= */ "new_table"); @@ -1702,17 +1862,15 @@ public void testUniverseDomain() throws IOException { * @param rowOffset * @return the number of requested rows to skip or the total rows read if stream had less rows. */ - private long ReadStreamToOffset(ReadStream readStream, long rowOffset) { - + private long readStreamToOffset(ReadStream readStream, long rowOffset) { ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(readStream.getName()).build(); long rowCount = 0; - ServerStream serverStream = client.readRowsCallable().call(readRowsRequest); - Iterator responseIterator = serverStream.iterator(); + ServerStream serverStream = + readClient.readRowsCallable().call(readRowsRequest); - while (responseIterator.hasNext()) { - ReadRowsResponse response = responseIterator.next(); + for (ReadRowsResponse response : serverStream) { rowCount += response.getRowCount(); if (rowCount >= rowOffset) { return rowOffset; @@ -1722,92 +1880,6 @@ private long ReadStreamToOffset(ReadStream readStream, long rowOffset) { return rowCount; } - /** - * Reads all the rows from the specified table. - * - *

For every row, the consumer is called for processing. - * - * @param table - * @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned. - * @param filter Optional. If specified, it will be used to restrict returned data. - * @param consumer that receives all Avro rows. - * @throws IOException - */ - private void ProcessRowsAtSnapshot( - String table, Long snapshotInMillis, String filter, AvroRowConsumer consumer) - throws IOException { - Preconditions.checkNotNull(table); - Preconditions.checkNotNull(consumer); - - CreateReadSessionRequest.Builder createSessionRequestBuilder = - CreateReadSessionRequest.newBuilder() - .setParent(parentProjectId) - .setMaxStreamCount(1) - .setReadSession( - ReadSession.newBuilder().setTable(table).setDataFormat(DataFormat.AVRO).build()); - - if (snapshotInMillis != null) { - Timestamp snapshotTimestamp = - Timestamp.newBuilder() - .setSeconds(snapshotInMillis / 1_000) - .setNanos((int) ((snapshotInMillis % 1000) * 1000000)) - .build(); - createSessionRequestBuilder - .getReadSessionBuilder() - .setTableModifiers( - TableModifiers.newBuilder().setSnapshotTime(snapshotTimestamp).build()); - } - - if (filter != null && !filter.isEmpty()) { - createSessionRequestBuilder - .getReadSessionBuilder() - .setReadOptions(TableReadOptions.newBuilder().setRowRestriction(filter).build()); - } - - ReadSession session = client.createReadSession(createSessionRequestBuilder.build()); - assertEquals( - String.format( - "Did not receive expected number of streams for table '%s' CreateReadSession" - + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); - - ReadRowsRequest readRowsRequest = - ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); - - SimpleRowReaderAvro reader = - new SimpleRowReaderAvro(new Schema.Parser().parse(session.getAvroSchema().getSchema())); - - ServerStream stream = client.readRowsCallable().call(readRowsRequest); - for (ReadRowsResponse response : stream) { - reader.processRows(response.getAvroRows(), consumer); - } - } - - /** - * Reads all the rows from the specified table and returns a list as generic Avro records. - * - * @param table - * @param filter Optional. If specified, it will be used to restrict returned data. - * @return - */ - List ReadAllRows(String table, String filter) throws IOException { - final List rows = new ArrayList<>(); - ProcessRowsAtSnapshot( - /* table= */ table, - /* snapshotInMillis= */ null, - /* filter= */ filter, - new AvroRowConsumer() { - @Override - public void accept(GenericData.Record record) { - // clone the record since that reference will be reused by the reader. - rows.add(new GenericRecordBuilder(record).build()); - } - }); - return rows; - } - /** * Runs a query job with WRITE_APPEND disposition to the destination table and returns the * successfully completed job. @@ -1817,9 +1889,9 @@ public void accept(GenericData.Record record) { * @return * @throws InterruptedException */ - private Job RunQueryAppendJobAndExpectSuccess(TableId destinationTableId, String query) + private Job runQueryAppendJobAndExpectSuccess(TableId destinationTableId, String query) throws InterruptedException { - return RunQueryJobAndExpectSuccess( + return runQueryJobAndExpectSuccess( QueryJobConfiguration.newBuilder(query) .setDestinationTable(destinationTableId) .setUseQueryCache(false) @@ -1835,7 +1907,7 @@ private Job RunQueryAppendJobAndExpectSuccess(TableId destinationTableId, String * @return * @throws InterruptedException */ - private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration) + private Job runQueryJobAndExpectSuccess(QueryJobConfiguration configuration) throws InterruptedException { Job job = bigquery.create(JobInfo.of(configuration)); Job completedJob = @@ -1845,40 +1917,10 @@ private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration) assertNotNull(completedJob); assertNull( + /* object= */ completedJob.getStatus().getError(), /* message= */ "Received a job status that is not a success: " - + completedJob.getStatus().toString(), - /* object= */ completedJob.getStatus().getError()); + + completedJob.getStatus().toString()); return completedJob; } - - static ServiceAccountCredentials loadCredentials(String credentialFile) { - try { - InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes()); - return ServiceAccountCredentials.fromStream(keyStream); - } catch (IOException e) { - fail("Couldn't create fake JSON credentials."); - } - return null; - } - - static class AppendCompleteCallback implements ApiFutureCallback { - private static final Object lock = new Object(); - private static int batchCount = 0; - - public void onSuccess(AppendRowsResponse response) { - synchronized (lock) { - if (response.hasError()) { - System.out.format("Error: %s\n", response.getError()); - } else { - ++batchCount; - System.out.format("Wrote batch %d\n", batchCount); - } - } - } - - public void onFailure(Throwable throwable) { - System.out.format("Error: %s\n", throwable.toString()); - } - } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java similarity index 74% rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java index 756dfcc793..b7e46be405 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java @@ -16,14 +16,21 @@ package com.google.cloud.bigquery.storage.v1.it; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME; import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import com.google.api.client.util.Sleeper; import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; import com.google.api.gax.retrying.RetrySettings; import com.google.api.gax.rpc.FixedHeaderProvider; import com.google.api.gax.rpc.HeaderProvider; @@ -40,13 +47,20 @@ import com.google.cloud.bigquery.storage.v1.Exceptions.OffsetOutOfRange; import com.google.cloud.bigquery.storage.v1.Exceptions.SchemaMismatchedException; import com.google.cloud.bigquery.storage.v1.Exceptions.StreamFinalizedException; +import com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource; +import com.google.cloud.bigquery.storage.v1.it.util.Helper; import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; import com.google.protobuf.ByteString; +import com.google.protobuf.DescriptorProtos; import com.google.protobuf.DescriptorProtos.DescriptorProto; import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; import com.google.protobuf.Descriptors; import com.google.protobuf.Descriptors.DescriptorValidationException; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.Int64Value; +import com.google.protobuf.Message; import io.grpc.Status; import io.grpc.Status.Code; import java.io.ByteArrayOutputStream; @@ -64,6 +78,7 @@ import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Logger; +import java.util.stream.Collectors; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.vector.*; @@ -74,17 +89,20 @@ import org.apache.arrow.vector.ipc.message.MessageSerializer; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.avro.generic.GenericData; import org.json.JSONArray; import org.json.JSONObject; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; /** Integration tests for BigQuery Write API. */ -public class ITBigQueryWriteManualClientTest { +@Execution(ExecutionMode.SAME_THREAD) +class ITBigQueryStorageWriteClientTest { private static final Logger LOG = - Logger.getLogger(ITBigQueryWriteManualClientTest.class.getName()); + Logger.getLogger(ITBigQueryStorageWriteClientTest.class.getName()); private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); private static final String DATASET_EU = RemoteBigQueryHelper.generateDatasetName(); private static final String TABLE = "testtable"; @@ -94,7 +112,9 @@ public class ITBigQueryWriteManualClientTest { private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; - private static BigQueryWriteClient client; + private static BigQueryReadClient readClient; + private static BigQueryWriteClient writeClient; + private static String parentProjectId; private static TableInfo tableInfo; private static TableInfo tableInfo2; @@ -109,7 +129,48 @@ public class ITBigQueryWriteManualClientTest { private static final BufferAllocator allocator = new RootAllocator(); - public class StringWithSecondsNanos { + // Arrow is a bit special in that timestamps are limited to nanoseconds precision. + // The data will be padded to fit into the higher precision columns. + private static final Object[][] INPUT_ARROW_WRITE_TIMESTAMPS = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, 1735734896123456789L}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, 1580646896123456789L}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, 636467696123456789L}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, 165846896123456789L} + }; + + // Arrow's higher precision column is padded with extra 0's if configured to return + // ISO as output for any picosecond enabled column. + private static final Object[][] EXPECTED_ARROW_WRITE_TIMESTAMPS_ISO_OUTPUT = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789000Z"}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789000Z"}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789000Z"}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789000Z"} + }; + + // Special case where users can use the Write API with Protobuf messages + // The format is two fields: 1. Seconds from epoch and 2. Subsecond fractional (millis, micros, + // nano, or pico). This test case is using picos sub-second fractional + private static final Long[][] INPUT_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS = + new Long[][] { + {1735734896L, 123456789123L}, /* 2025-01-01T12:34:56.123456789123Z */ + {1580646896L, 123456789123L}, /* 2020-02-02T12:34:56.123456789123Z */ + {636467696L, 123456789123L}, /* 1990-03-03T12:34:56.123456789123Z */ + {165846896L, 123456789123L} /* 1975-04-04T12:34:56.123456789123Z */ + }; + + // Expected ISO8601 output when using proto descriptors to write to BQ with pico precision + private static final String[] + EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT = + new String[] { + "2025-01-01T12:34:56.123456789123Z", + "2020-02-02T12:34:56.123456789123Z", + "1990-03-03T12:34:56.123456789123Z", + "1975-04-04T12:34:56.123456789123Z" + }; + + static class StringWithSecondsNanos { public String foo; public long seconds; public int nanos; @@ -124,11 +185,14 @@ public StringWithSecondsNanos(String fooParam, long secondsParam, int nanosParam private static final HeaderProvider USER_AGENT_HEADER_PROVIDER = FixedHeaderProvider.create("User-Agent", "my_product_name/1.0 (GPN:Samples;test)"); - @BeforeClass - public static void beforeClass() throws IOException { + @BeforeAll + static void beforeAll() throws IOException { + readClient = BigQueryReadClient.create(); + BigQueryWriteSettings settings = BigQueryWriteSettings.newBuilder().setHeaderProvider(USER_AGENT_HEADER_PROVIDER).build(); - client = BigQueryWriteClient.create(settings); + writeClient = BigQueryWriteClient.create(settings); + parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); bigquery = bigqueryHelper.getOptions().getService(); @@ -215,19 +279,25 @@ public static void beforeClass() throws IOException { bigquery.create(tableInfoEU); } - @AfterClass - public static void afterClass() { - if (client != null) { - client.close(); + @AfterAll + static void afterAll() throws InterruptedException { + if (writeClient != null) { + writeClient.close(); + writeClient.awaitTermination(10, TimeUnit.SECONDS); + } + + if (readClient != null) { + readClient.close(); + readClient.awaitTermination(10, TimeUnit.SECONDS); } if (bigquery != null) { RemoteBigQueryHelper.forceDelete(bigquery, DATASET); - LOG.info("Deleted test dataset: " + DATASET); + RemoteBigQueryHelper.forceDelete(bigquery, DATASET_EU); } } - ProtoRows CreateProtoRows(String[] messages) { + ProtoRows createProtoRows(String[] messages) { ProtoRows.Builder rows = ProtoRows.newBuilder(); for (String message : messages) { FooType foo = FooType.newBuilder().setFoo(message).build(); @@ -236,7 +306,7 @@ ProtoRows CreateProtoRows(String[] messages) { return rows.build(); } - ProtoSchema CreateProtoSchemaWithColField() { + ProtoSchema createProtoSchemaWithColField() { return ProtoSchema.newBuilder() .setProtoDescriptor( DescriptorProto.newBuilder() @@ -251,7 +321,7 @@ ProtoSchema CreateProtoSchemaWithColField() { .build(); } - ProtoRows CreateProtoOptionalRows(String[] messages) { + ProtoRows createProtoOptionalRows(String[] messages) { ProtoRows.Builder rows = ProtoRows.newBuilder(); for (String message : messages) { FooOptionalType foo = FooOptionalType.newBuilder().setFoo(message).build(); @@ -260,7 +330,7 @@ ProtoRows CreateProtoOptionalRows(String[] messages) { return rows.build(); } - ProtoRows CreateProtoRowsMultipleColumns(String[] messages) { + ProtoRows createProtoRowsMultipleColumns(String[] messages) { ProtoRows.Builder rows = ProtoRows.newBuilder(); for (String message : messages) { UpdatedFooType foo = UpdatedFooType.newBuilder().setFoo(message).setBar(message).build(); @@ -269,7 +339,7 @@ ProtoRows CreateProtoRowsMultipleColumns(String[] messages) { return rows.build(); } - ProtoRows CreateProtoRowsComplex(String[] messages) { + ProtoRows createProtoRowsComplex(String[] messages) { ProtoRows.Builder rows = ProtoRows.newBuilder(); for (String message : messages) { ComplicateType foo = @@ -281,7 +351,7 @@ ProtoRows CreateProtoRowsComplex(String[] messages) { return rows.build(); } - ProtoRows CreateProtoRowsMixed(StringWithSecondsNanos[] messages) { + ProtoRows createProtoRowsMixed(StringWithSecondsNanos[] messages) { ProtoRows.Builder rows = ProtoRows.newBuilder(); for (StringWithSecondsNanos message : messages) { FooTimestampType datum = @@ -299,65 +369,67 @@ ProtoRows CreateProtoRowsMixed(StringWithSecondsNanos[] messages) { } @Test - public void testBatchWriteWithCommittedStreamEU() + void testBatchWriteWithCommittedStreamEU() throws IOException, InterruptedException, ExecutionException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableIdEU) .setWriteStream( WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) .build()); - StreamWriter streamWriter = + ApiFuture response1; + ApiFuture response2; + try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()) .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) - .build(); - LOG.info("Sending one message"); + .build()) { + LOG.info("Sending one message"); - ApiFuture response = - streamWriter.append(CreateProtoRows(new String[] {"aaa"}), 0); - assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + ApiFuture response = + streamWriter.append(createProtoRows(new String[] {"aaa"}), 0); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); - LOG.info("Sending two more messages"); - ApiFuture response1 = - streamWriter.append(CreateProtoRows(new String[] {"bbb", "ccc"}), 1); - ApiFuture response2 = - streamWriter.append(CreateProtoRows(new String[] {"ddd"}), 3); + LOG.info("Sending two more messages"); + response1 = streamWriter.append(createProtoRows(new String[] {"bbb", "ccc"}), 1); + response2 = streamWriter.append(createProtoRows(new String[] {"ddd"}), 3); + } assertEquals(1, response1.get().getAppendResult().getOffset().getValue()); assertEquals(3, response2.get().getAppendResult().getOffset().getValue()); } @Test - public void testProto3OptionalBatchWriteWithCommittedStream() + void testProto3OptionalBatchWriteWithCommittedStream() throws IOException, InterruptedException, ExecutionException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId) .setWriteStream( WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) .build()); - StreamWriter streamWriter = + ApiFuture response1; + ApiFuture response2; + try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()) .setWriterSchema(ProtoSchemaConverter.convert(FooOptionalType.getDescriptor())) - .build(); - LOG.info("Sending one message"); + .build()) { + LOG.info("Sending one message"); - ApiFuture response = - streamWriter.append(CreateProtoOptionalRows(new String[] {"aaa"}), 0); - assertEquals(0, response.get().getAppendResult().getOffset().getValue()); + ApiFuture response = + streamWriter.append(createProtoOptionalRows(new String[] {"aaa"}), 0); + assertEquals(0, response.get().getAppendResult().getOffset().getValue()); - LOG.info("Sending two more messages"); - ApiFuture response1 = - streamWriter.append(CreateProtoOptionalRows(new String[] {"bbb", "ccc"}), 1); - ApiFuture response2 = - streamWriter.append(CreateProtoOptionalRows(new String[] {""}), 3); + LOG.info("Sending two more messages"); + response1 = streamWriter.append(createProtoOptionalRows(new String[] {"bbb", "ccc"}), 1); + response2 = streamWriter.append(createProtoOptionalRows(new String[] {""}), 3); + } assertEquals(1, response1.get().getAppendResult().getOffset().getValue()); assertEquals(3, response2.get().getAppendResult().getOffset().getValue()); } @Test - public void testJsonStreamWriterCommittedStream() + void testJsonStreamWriterCommittedStream() throws IOException, InterruptedException, ExecutionException, @@ -382,7 +454,7 @@ public void testJsonStreamWriterCommittedStream() bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( @@ -443,12 +515,12 @@ public void testJsonStreamWriterCommittedStream() assertEquals("bbb", iter.next().get(0).getStringValue()); assertEquals("ccc", iter.next().get(0).getStringValue()); assertEquals("ddd", iter.next().get(0).getStringValue()); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); } } @Test - public void testRowErrors() + void testRowErrors() throws IOException, InterruptedException, ExecutionException, @@ -469,60 +541,59 @@ public void testRowErrors() .build(); bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); - StreamWriter streamWriter = + ApiFuture futureResponse1; + try (StreamWriter streamWriter = StreamWriter.newBuilder(parent.toString() + "/_default") .setWriterSchema(ProtoSchemaConverter.convert(FooTimestampType.getDescriptor())) - .build(); + .build()) { - LOG.info("Sending three messages"); - StringWithSecondsNanos[] myBadList = { - new StringWithSecondsNanos("aaabbbcccddd", 1663821424, 0), - new StringWithSecondsNanos("bbb", Long.MIN_VALUE, 0), - new StringWithSecondsNanos("cccdddeeefffggg", 1663621424, 0) - }; - ApiFuture futureResponse = - streamWriter.append(CreateProtoRowsMixed(myBadList), -1); - AppendRowsResponse actualResponse = null; - try { - actualResponse = futureResponse.get(); - } catch (Throwable t) { - assertTrue(t instanceof ExecutionException); - t = t.getCause(); - assertTrue(t instanceof AppendSerializationError); - AppendSerializationError e = (AppendSerializationError) t; - LOG.info("Found row errors on stream: " + e.getStreamName()); - assertEquals( - "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field" - + " foo.", - e.getRowIndexToErrorMessage().get(0)); - assertEquals( - "Timestamp field value is out of range: -9223372036854775808 on field bar.", - e.getRowIndexToErrorMessage().get(1)); - assertEquals( - "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field" - + " foo.", - e.getRowIndexToErrorMessage().get(2)); - for (Map.Entry entry : e.getRowIndexToErrorMessage().entrySet()) { - LOG.info("Bad row index: " + entry.getKey() + ", has problem: " + entry.getValue()); + LOG.info("Sending three messages"); + StringWithSecondsNanos[] myBadList = { + new StringWithSecondsNanos("aaabbbcccddd", 1663821424, 0), + new StringWithSecondsNanos("bbb", Long.MIN_VALUE, 0), + new StringWithSecondsNanos("cccdddeeefffggg", 1663621424, 0) + }; + ApiFuture futureResponse = + streamWriter.append(createProtoRowsMixed(myBadList), -1); + AppendRowsResponse actualResponse = null; + try { + actualResponse = futureResponse.get(); + } catch (Throwable t) { + assertTrue(t instanceof ExecutionException); + t = t.getCause(); + assertTrue(t instanceof AppendSerializationError); + AppendSerializationError e = (AppendSerializationError) t; + LOG.info("Found row errors on stream: " + e.getStreamName()); + assertEquals( + "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field" + + " foo.", + e.getRowIndexToErrorMessage().get(0)); + assertEquals( + "Timestamp field value is out of range: -9223372036854775808 on field bar.", + e.getRowIndexToErrorMessage().get(1)); + assertEquals( + "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field" + + " foo.", + e.getRowIndexToErrorMessage().get(2)); + for (Map.Entry entry : e.getRowIndexToErrorMessage().entrySet()) { + LOG.info("Bad row index: " + entry.getKey() + ", has problem: " + entry.getValue()); + } } + assertNull(actualResponse); + + LOG.info("Resending with three good messages"); + StringWithSecondsNanos[] myGoodList = { + new StringWithSecondsNanos("aaa", 1664821424, 0), + new StringWithSecondsNanos("bbb", 1663821424, 0), + new StringWithSecondsNanos("ccc", 1664801424, 0) + }; + futureResponse1 = streamWriter.append(createProtoRowsMixed(myGoodList), -1); } - assertEquals(null, actualResponse); - - LOG.info("Resending with three good messages"); - StringWithSecondsNanos[] myGoodList = { - new StringWithSecondsNanos("aaa", 1664821424, 0), - new StringWithSecondsNanos("bbb", 1663821424, 0), - new StringWithSecondsNanos("ccc", 1664801424, 0) - }; - ApiFuture futureResponse1 = - streamWriter.append(CreateProtoRowsMixed(myGoodList), -1); assertEquals(0, futureResponse1.get().getAppendResult().getOffset().getValue()); TableResult result = bigquery.listTableData(tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); - Iterator iterDump = result.getValues().iterator(); - while (iterDump.hasNext()) { - FieldValueList currentRow = iterDump.next(); + for (FieldValueList currentRow : result.getValues()) { LOG.info("Table row contains " + currentRow.size() + " field values."); LOG.info("Table column has foo: " + currentRow.get(0).getStringValue()); LOG.info("Table column has bar: " + currentRow.get(1).getTimestampValue()); @@ -538,11 +609,11 @@ public void testRowErrors() currentRow = iter.next(); assertEquals("ccc", currentRow.get(0).getStringValue()); assertEquals(1664801424000000L, currentRow.get(1).getTimestampValue()); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); } @Test - public void testRequestProfilerWithCommittedStream() + void testRequestProfilerWithCommittedStream() throws DescriptorValidationException, IOException, InterruptedException { String tableName = "TestProfiler"; TableId tableId = TableId.of(DATASET, tableName); @@ -553,7 +624,7 @@ public void testRequestProfilerWithCommittedStream() TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( @@ -585,17 +656,17 @@ public void testRequestProfilerWithCommittedStream() LOG.info("Waiting for all responses to come back"); for (int i = 0; i < totalRequest; i++) { try { - Assert.assertEquals( + assertEquals( allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatch); } catch (ExecutionException ex) { - Assert.fail("Unexpected error " + ex); + fail("Unexpected error " + ex); } } RequestProfiler.disableAndResetProfiler(); } @Test - public void testJsonStreamWriterWithDefaultSchema() + void testJsonStreamWriterWithDefaultSchema() throws IOException, InterruptedException, ExecutionException, @@ -633,7 +704,7 @@ public void testJsonStreamWriterWithDefaultSchema() // Create JsonStreamWriter with newBuilder(streamOrTable, client) try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(parent.toString(), client) + JsonStreamWriter.newBuilder(parent.toString(), writeClient) .setIgnoreUnknownFields(true) .build()) { LOG.info("Sending one message"); @@ -696,9 +767,9 @@ public void testJsonStreamWriterWithDefaultSchema() ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1); LOG.info("Sending one more message"); ApiFuture response4 = jsonStreamWriter.append(jsonArr4, -1); - Assert.assertFalse(response2.get().getAppendResult().hasOffset()); - Assert.assertFalse(response3.get().getAppendResult().hasOffset()); - Assert.assertFalse(response4.get().getAppendResult().hasOffset()); + assertFalse(response2.get().getAppendResult().hasOffset()); + assertFalse(response3.get().getAppendResult().hasOffset()); + assertFalse(response4.get().getAppendResult().hasOffset()); TableResult result = bigquery.listTableData( @@ -716,28 +787,28 @@ public void testJsonStreamWriterWithDefaultSchema() FieldValueList currentRow2 = iter.next(); assertEquals("YQ==", currentRow2.get(3).getRepeatedValue().get(0).getStringValue()); assertEquals("Yg==", currentRow2.get(3).getRepeatedValue().get(1).getStringValue()); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); } } @Test - public void testJsonStreamWriterWithDefaultSchemaNoTable() { + void testJsonStreamWriterWithDefaultSchemaNoTable() { String tableName = "JsonStreamWriterWithDefaultSchemaNoTable"; TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); // Create JsonStreamWriter with newBuilder(streamOrTable, client) - try { - JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(parent.toString(), client) - .setIgnoreUnknownFields(true) - .build(); + try (JsonStreamWriter ignore = + JsonStreamWriter.newBuilder(parent.toString(), writeClient) + .setIgnoreUnknownFields(true) + .build()) { + // Do nothing } catch (Exception exception) { assertTrue(exception.getMessage().contains("it may not exist")); } } @Test - public void testJsonStreamWriterWithDefaultStream() + void testJsonStreamWriterWithDefaultStream() throws IOException, InterruptedException, ExecutionException, @@ -870,9 +941,9 @@ public void testJsonStreamWriterWithDefaultStream() ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1); LOG.info("Sending one more message"); ApiFuture response4 = jsonStreamWriter.append(jsonArr4, -1); - Assert.assertFalse(response2.get().getAppendResult().hasOffset()); - Assert.assertFalse(response3.get().getAppendResult().hasOffset()); - Assert.assertFalse(response4.get().getAppendResult().hasOffset()); + assertFalse(response2.get().getAppendResult().hasOffset()); + assertFalse(response3.get().getAppendResult().hasOffset()); + assertFalse(response4.get().getAppendResult().hasOffset()); TableResult result = bigquery.listTableData( @@ -898,12 +969,12 @@ public void testJsonStreamWriterWithDefaultStream() FieldValueList currentRow2 = iter.next(); assertEquals("YQ==", currentRow2.get(3).getRepeatedValue().get(0).getStringValue()); assertEquals("Yg==", currentRow2.get(3).getRepeatedValue().get(1).getStringValue()); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); } } @Test - public void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven() + void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven() throws IOException, InterruptedException, ExecutionException, @@ -918,7 +989,7 @@ public void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven() TableInfo.newBuilder(TableId.of(DATASET, tableName), defaultValueTableDefinition).build(); bigquery.create(tableInfo); try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(defaultTableId, client) + JsonStreamWriter.newBuilder(defaultTableId, writeClient) .setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE) .build()) { testJsonStreamWriterForDefaultValue(jsonStreamWriter); @@ -926,7 +997,7 @@ public void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven() } @Test - public void testJsonExclusiveStreamOnTableWithDefaultValue_GiveTableSchema() + void testJsonExclusiveStreamOnTableWithDefaultValue_GiveTableSchema() throws IOException, InterruptedException, ExecutionException, @@ -941,7 +1012,7 @@ public void testJsonExclusiveStreamOnTableWithDefaultValue_GiveTableSchema() TableInfo.newBuilder(TableId.of(DATASET, tableName), defaultValueTableDefinition).build(); bigquery.create(tableInfo); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(exclusiveTableId) .setWriteStream( @@ -1005,18 +1076,18 @@ private void testJsonStreamWriterForDefaultValue(JsonStreamWriter jsonStreamWrit currentRow = iter.next(); assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); - assertEquals(null, currentRow.get(1).getValue()); + assertNull(currentRow.get(1).getValue()); assertFalse(currentRow.get(2).getStringValue().isEmpty()); // Check whether the recorded value is up to date enough. parsedInstant = Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue()); assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS))); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); } @Test - public void testStreamWriterWithDefaultValue() throws ExecutionException, InterruptedException { + void testStreamWriterWithDefaultValue() throws ExecutionException, InterruptedException { String tableName = "streamWriterWithDefaultValue"; String exclusiveTableId = String.format( @@ -1071,7 +1142,7 @@ public void testStreamWriterWithDefaultValue() throws ExecutionException, Interr currentRow = iter.next(); assertEquals("default_value_for_test", currentRow.get(0).getStringValue()); - assertEquals(null, currentRow.get(1).getValue()); + assertNull(currentRow.get(1).getValue()); assertFalse(currentRow.get(2).getStringValue().isEmpty()); // Check whether the recorded value is up to date enough. Instant parsedInstant = @@ -1083,13 +1154,13 @@ public void testStreamWriterWithDefaultValue() throws ExecutionException, Interr } @Test - public void testArrowIngestionWithSerializedInput() + void testArrowIngestionWithSerializedInput() throws IOException, InterruptedException, ExecutionException, TimeoutException { testArrowIngestion(/* serializedInput= */ true); } @Test - public void testArrowIngestionWithUnSerializedInput() + void testArrowIngestionWithUnSerializedInput() throws IOException, InterruptedException, ExecutionException, TimeoutException { testArrowIngestion(/* serializedInput= */ false); } @@ -1178,7 +1249,7 @@ private void testArrowIngestion(boolean serializedInput) } if (serializedInput) { try (StreamWriter streamWriter = - StreamWriter.newBuilder(tableId + "/_default", client) + StreamWriter.newBuilder(tableId + "/_default", writeClient) .setWriterSchema(v1ArrowSchema) .setTraceId(TEST_TRACE_ID) .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) @@ -1195,7 +1266,7 @@ private void testArrowIngestion(boolean serializedInput) } } else { try (StreamWriter streamWriter = - StreamWriter.newBuilder(tableId + "/_default", client) + StreamWriter.newBuilder(tableId + "/_default", writeClient) .setWriterSchema(arrowSchema) .setTraceId(TEST_TRACE_ID) .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) @@ -1218,21 +1289,21 @@ private void testArrowIngestion(boolean serializedInput) FieldValueList currentRow = iter.next(); assertEquals("A", currentRow.get(0).getStringValue()); assertEquals("1", currentRow.get(1).getStringValue()); - assertEquals(true, currentRow.get(2).getBooleanValue()); + assertTrue(currentRow.get(2).getBooleanValue()); currentRow = iter.next(); assertEquals("B", currentRow.get(0).getStringValue()); assertEquals("2", currentRow.get(1).getStringValue()); - assertEquals(false, currentRow.get(2).getBooleanValue()); + assertFalse(currentRow.get(2).getBooleanValue()); currentRow = iter.next(); assertEquals("C", currentRow.get(0).getStringValue()); assertEquals("3", currentRow.get(1).getStringValue()); - assertEquals(true, currentRow.get(2).getBooleanValue()); - assertEquals(false, iter.hasNext()); + assertTrue(currentRow.get(2).getBooleanValue()); + assertFalse(iter.hasNext()); } // This test runs about 1 min. @Test - public void testJsonStreamWriterWithMessagesOver10M() + void testJsonStreamWriterWithMessagesOver10M() throws IOException, InterruptedException, ExecutionException, @@ -1246,7 +1317,7 @@ public void testJsonStreamWriterWithMessagesOver10M() TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( @@ -1274,16 +1345,16 @@ public void testJsonStreamWriterWithMessagesOver10M() LOG.info("Waiting for all responses to come back"); for (int i = 0; i < totalRequest; i++) { try { - Assert.assertEquals( + assertEquals( allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatch); } catch (ExecutionException ex) { - Assert.fail("Unexpected error " + ex); + fail("Unexpected error " + ex); } } } @Test - public void testJsonStreamWriterSchemaUpdate() + void testJsonStreamWriterSchemaUpdate() throws DescriptorValidationException, IOException, InterruptedException, ExecutionException { String tableName = "SchemaUpdateTestTable"; TableId tableId = TableId.of(DATASET, tableName); @@ -1294,14 +1365,14 @@ public void testJsonStreamWriterSchemaUpdate() bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) .build()); try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(writeStream.getName(), client).build()) { + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) { // write the 1st row JSONObject foo = new JSONObject(); foo.put("col1", "aaa"); @@ -1367,7 +1438,7 @@ public void testJsonStreamWriterSchemaUpdate() } @Test - public void testJsonStreamWriterSchemaUpdateConcurrent() + void testJsonStreamWriterSchemaUpdateConcurrent() throws DescriptorValidationException, IOException, InterruptedException { // Create test table and test stream String tableName = "ConcurrentSchemaUpdateTestTable"; @@ -1379,7 +1450,7 @@ public void testJsonStreamWriterSchemaUpdateConcurrent() bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( @@ -1411,12 +1482,14 @@ public void testJsonStreamWriterSchemaUpdateConcurrent() // Start writing using the JsonWriter try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(writeStream.getName(), client).build()) { + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) { int numberOfThreads = 5; + CountDownLatch latch; + AtomicInteger next; ExecutorService streamTaskExecutor = Executors.newFixedThreadPool(5); - CountDownLatch latch = new CountDownLatch(numberOfThreads); + latch = new CountDownLatch(numberOfThreads); // Used to verify data correctness - AtomicInteger next = new AtomicInteger(); + next = new AtomicInteger(); // update TableSchema async Runnable updateTableSchemaTask = @@ -1475,6 +1548,7 @@ public void testJsonStreamWriterSchemaUpdateConcurrent() }); } latch.await(); + streamTaskExecutor.shutdown(); // verify that the last 5 rows streamed are ccc,ddd Iterator rowsIter = bigquery.listTableData(tableId).getValues().iterator(); @@ -1492,19 +1566,15 @@ public void testJsonStreamWriterSchemaUpdateConcurrent() } @Test - public void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap() - throws DescriptorValidationException, - ExecutionException, - IOException, - InterruptedException, - ParseException { + void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap() + throws DescriptorValidationException, ExecutionException, IOException, InterruptedException { String tableName = "SchemaUpdateMissingValueMapTestTable"; TableId tableId = TableId.of(DATASET, tableName); tableInfo = TableInfo.newBuilder(tableId, defaultValueTableDefinition).build(); bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( @@ -1517,7 +1587,7 @@ public void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap() "date_with_default_to_current", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE); try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(writeStream.getName(), client) + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient) .setMissingValueInterpretationMap(missingValueMap) .build()) { // Verify the missing value map @@ -1618,7 +1688,7 @@ public void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap() } @Test - public void testJsonStreamWriterWithFlexibleColumnName() + void testJsonStreamWriterWithFlexibleColumnName() throws IOException, InterruptedException, ExecutionException, @@ -1643,7 +1713,7 @@ public void testJsonStreamWriterWithFlexibleColumnName() bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( @@ -1704,12 +1774,12 @@ public void testJsonStreamWriterWithFlexibleColumnName() assertEquals("bbb", iter.next().get(0).getStringValue()); assertEquals("ccc", iter.next().get(0).getStringValue()); assertEquals("ddd", iter.next().get(0).getStringValue()); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); } } @Test - public void testJsonStreamWriterWithNestedFlexibleColumnName() + void testJsonStreamWriterWithNestedFlexibleColumnName() throws IOException, InterruptedException, ExecutionException, @@ -1736,7 +1806,7 @@ public void testJsonStreamWriterWithNestedFlexibleColumnName() bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( @@ -1797,12 +1867,12 @@ public void testJsonStreamWriterWithNestedFlexibleColumnName() FieldValueList lastRecord = lastRow.get(1).getRepeatedValue().get(0).getRecordValue(); assertEquals("nested-str2", lastRecord.get(0).getStringValue()); assertEquals("20", lastRecord.get(1).getStringValue()); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); } } @Test - public void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName() + void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName() throws DescriptorValidationException, IOException, InterruptedException, ExecutionException { String tableName = "SchemaUpdateFlexColumnTestTable"; TableId tableId = TableId.of(DATASET, tableName); @@ -1813,14 +1883,14 @@ public void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName() bigquery.create(tableInfo); TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(parent.toString()) .setWriteStream( WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) .build()); try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(writeStream.getName(), client).build()) { + JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) { // write the 1st row JSONObject foo = new JSONObject(); foo.put("col1-列", "aaa"); @@ -1886,27 +1956,27 @@ public void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName() } @Test - public void testComplicateSchemaWithPendingStream() + void testComplicateSchemaWithPendingStream() throws IOException, InterruptedException, ExecutionException { LOG.info("Create a write stream"); WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId2) .setWriteStream(WriteStream.newBuilder().setType(WriteStream.Type.PENDING).build()) .build()); - FinalizeWriteStreamResponse finalizeResponse = FinalizeWriteStreamResponse.getDefaultInstance(); + FinalizeWriteStreamResponse finalizeResponse; try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()) .setWriterSchema(ProtoSchemaConverter.convert(ComplicateType.getDescriptor())) .build()) { LOG.info("Sending two messages"); ApiFuture response = - streamWriter.append(CreateProtoRowsComplex(new String[] {"aaa"}), 0L); + streamWriter.append(createProtoRowsComplex(new String[] {"aaa"}), 0L); assertEquals(0, response.get().getAppendResult().getOffset().getValue()); ApiFuture response2 = - streamWriter.append(CreateProtoRowsComplex(new String[] {"bbb"}), 1L); + streamWriter.append(createProtoRowsComplex(new String[] {"bbb"}), 1L); assertEquals(1, response2.get().getAppendResult().getOffset().getValue()); // Nothing showed up since rows are not committed. @@ -1914,31 +1984,27 @@ public void testComplicateSchemaWithPendingStream() bigquery.listTableData( tableInfo2.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); Iterator iter = result.getValues().iterator(); - assertEquals(false, iter.hasNext()); + assertFalse(iter.hasNext()); LOG.info("Finalize a write stream"); finalizeResponse = - client.finalizeWriteStream( + writeClient.finalizeWriteStream( FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build()); ApiFuture response3 = - streamWriter.append(CreateProtoRows(new String[] {"ccc"}), 2L); - try { - response3.get(); - Assert.fail("Append to finalized stream should fail."); - } catch (Exception expected) { - LOG.info("Got exception: " + expected.toString()); - } + streamWriter.append(createProtoRows(new String[] {"ccc"}), 2L); + ExecutionException expected = assertThrows(ExecutionException.class, () -> response3.get()); + LOG.info("Got exception: " + expected.toString()); } assertEquals(2, finalizeResponse.getRowCount()); LOG.info("Commit a write stream"); BatchCommitWriteStreamsResponse batchCommitWriteStreamsResponse = - client.batchCommitWriteStreams( + writeClient.batchCommitWriteStreams( BatchCommitWriteStreamsRequest.newBuilder() .setParent(tableId2) .addWriteStreams(writeStream.getName()) .build()); - assertEquals(true, batchCommitWriteStreamsResponse.hasCommitTime()); + assertTrue(batchCommitWriteStreamsResponse.hasCommitTime()); TableResult queryResult = bigquery.query( QueryJobConfiguration.newBuilder("SELECT * from " + DATASET + '.' + TABLE2).build()); @@ -1958,9 +2024,9 @@ public void testComplicateSchemaWithPendingStream() } @Test - public void testStreamError() throws IOException, InterruptedException, ExecutionException { + void testStreamError() throws IOException, InterruptedException, ExecutionException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId) .setWriteStream( @@ -1971,30 +2037,26 @@ public void testStreamError() throws IOException, InterruptedException, Executio .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) .build()) { ApiFuture response = - streamWriter.append(CreateProtoRows(new String[] {"aaa"}), -1L); + streamWriter.append(createProtoRows(new String[] {"aaa"}), -1L); assertEquals(0L, response.get().getAppendResult().getOffset().getValue()); // Send in a bogus stream name should cause in connection error. ApiFuture response2 = - streamWriter.append(CreateProtoRows(new String[] {"aaa"}), 100L); - try { - response2.get(); - Assert.fail("Should fail"); - } catch (ExecutionException e) { - assertThat(e.getCause().getMessage()) - .contains("OUT_OF_RANGE: The offset is beyond stream, expected offset 1, received 100"); - } + streamWriter.append(createProtoRows(new String[] {"aaa"}), 100L); + ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get()); + assertThat(e.getCause().getMessage()) + .contains("OUT_OF_RANGE: The offset is beyond stream, expected offset 1, received 100"); // We can keep sending requests on the same stream. ApiFuture response3 = - streamWriter.append(CreateProtoRows(new String[] {"aaa"}), -1L); + streamWriter.append(createProtoRows(new String[] {"aaa"}), -1L); assertEquals(1L, response3.get().getAppendResult().getOffset().getValue()); } finally { } } @Test - public void testStreamSchemaMisMatchError() throws IOException, InterruptedException { + void testStreamSchemaMisMatchError() throws IOException, InterruptedException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId) .setWriteStream( @@ -2008,25 +2070,20 @@ public void testStreamSchemaMisMatchError() throws IOException, InterruptedExcep // Create a proto row that has extra fields than the table schema defined which should trigger // the SCHEMA_MISMATCH_EXTRA_FIELDS error ApiFuture response = - streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); - try { - response.get(); - Assert.fail("Should fail"); - } catch (ExecutionException e) { - assertEquals(Exceptions.SchemaMismatchedException.class, e.getCause().getClass()); - Exceptions.SchemaMismatchedException actualError = (SchemaMismatchedException) e.getCause(); - assertNotNull(actualError.getStreamName()); - // This verifies that the Beam connector can consume this custom exception's grpc StatusCode - assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode()); - } + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + ExecutionException e = assertThrows(ExecutionException.class, () -> response.get()); + assertEquals(Exceptions.SchemaMismatchedException.class, e.getCause().getClass()); + Exceptions.SchemaMismatchedException actualError = (SchemaMismatchedException) e.getCause(); + assertNotNull(actualError.getStreamName()); + // This verifies that the Beam connector can consume this custom exception's grpc StatusCode + assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode()); } } @Test - public void testStreamFinalizedError() - throws IOException, InterruptedException, ExecutionException { + void testStreamFinalizedError() throws IOException, InterruptedException, ExecutionException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId) .setWriteStream( @@ -2038,33 +2095,28 @@ public void testStreamFinalizedError() .build()) { // Append once before finalizing the stream ApiFuture response = - streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); response.get(); // Finalize the stream in order to trigger STREAM_FINALIZED error - client.finalizeWriteStream( + writeClient.finalizeWriteStream( FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build()); // Try to append to a finalized stream ApiFuture response2 = - streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 1); - try { - response2.get(); - Assert.fail("Should fail"); - } catch (ExecutionException e) { - assertEquals(Exceptions.StreamFinalizedException.class, e.getCause().getClass()); - Exceptions.StreamFinalizedException actualError = (StreamFinalizedException) e.getCause(); - assertNotNull(actualError.getStreamName()); - // This verifies that the Beam connector can consume this custom exception's grpc StatusCode - assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode()); - assertThat(e.getCause().getMessage()).contains("Stream has been finalized"); - } + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 1); + ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get()); + assertEquals(Exceptions.StreamFinalizedException.class, e.getCause().getClass()); + Exceptions.StreamFinalizedException actualError = (StreamFinalizedException) e.getCause(); + assertNotNull(actualError.getStreamName()); + // This verifies that the Beam connector can consume this custom exception's grpc StatusCode + assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode()); + assertThat(e.getCause().getMessage()).contains("Stream has been finalized"); } } @Test - public void testOffsetAlreadyExistsError() - throws IOException, ExecutionException, InterruptedException { + void testOffsetAlreadyExistsError() throws IOException, ExecutionException, InterruptedException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId) .setWriteStream( @@ -2076,31 +2128,27 @@ public void testOffsetAlreadyExistsError() .build()) { // Append once with correct offset ApiFuture response = - streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); response.get(); // Append again with the same offset ApiFuture response2 = - streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); - try { - response2.get(); - Assert.fail("Should fail"); - } catch (ExecutionException e) { - assertEquals(Exceptions.OffsetAlreadyExists.class, e.getCause().getClass()); - Exceptions.OffsetAlreadyExists actualError = (OffsetAlreadyExists) e.getCause(); - assertNotNull(actualError.getStreamName()); - assertEquals(1, actualError.getExpectedOffset()); - assertEquals(0, actualError.getActualOffset()); - assertEquals(Code.ALREADY_EXISTS, Status.fromThrowable(e.getCause()).getCode()); - assertThat(e.getCause().getMessage()) - .contains("The offset is within stream, expected offset 1, received 0"); - } + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0); + ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get()); + assertEquals(Exceptions.OffsetAlreadyExists.class, e.getCause().getClass()); + Exceptions.OffsetAlreadyExists actualError = (OffsetAlreadyExists) e.getCause(); + assertNotNull(actualError.getStreamName()); + assertEquals(1, actualError.getExpectedOffset()); + assertEquals(0, actualError.getActualOffset()); + assertEquals(Code.ALREADY_EXISTS, Status.fromThrowable(e.getCause()).getCode()); + assertThat(e.getCause().getMessage()) + .contains("The offset is within stream, expected offset 1, received 0"); } } @Test - public void testOffsetOutOfRangeError() throws IOException, InterruptedException { + void testOffsetOutOfRangeError() throws IOException, InterruptedException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId) .setWriteStream( @@ -2112,27 +2160,23 @@ public void testOffsetOutOfRangeError() throws IOException, InterruptedException .build()) { // Append with an out of range offset ApiFuture response = - streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 10); - try { - response.get(); - Assert.fail("Should fail"); - } catch (ExecutionException e) { - assertEquals(Exceptions.OffsetOutOfRange.class, e.getCause().getClass()); - Exceptions.OffsetOutOfRange actualError = (OffsetOutOfRange) e.getCause(); - assertNotNull(actualError.getStreamName()); - assertEquals(0, actualError.getExpectedOffset()); - assertEquals(10, actualError.getActualOffset()); - assertEquals(Code.OUT_OF_RANGE, Status.fromThrowable(e.getCause()).getCode()); - assertThat(e.getCause().getMessage()) - .contains("The offset is beyond stream, expected offset 0, received 10"); - } + streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 10); + ExecutionException e = assertThrows(ExecutionException.class, () -> response.get()); + assertEquals(Exceptions.OffsetOutOfRange.class, e.getCause().getClass()); + Exceptions.OffsetOutOfRange actualError = (OffsetOutOfRange) e.getCause(); + assertNotNull(actualError.getStreamName()); + assertEquals(0, actualError.getExpectedOffset()); + assertEquals(10, actualError.getActualOffset()); + assertEquals(Code.OUT_OF_RANGE, Status.fromThrowable(e.getCause()).getCode()); + assertThat(e.getCause().getMessage()) + .contains("The offset is beyond stream, expected offset 0, received 10"); } } @Test - public void testStreamReconnect() throws IOException, InterruptedException, ExecutionException { + void testStreamReconnect() throws IOException, InterruptedException, ExecutionException { WriteStream writeStream = - client.createWriteStream( + writeClient.createWriteStream( CreateWriteStreamRequest.newBuilder() .setParent(tableId) .setWriteStream( @@ -2143,7 +2187,7 @@ public void testStreamReconnect() throws IOException, InterruptedException, Exec .setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())) .build()) { ApiFuture response = - streamWriter.append(CreateProtoRows(new String[] {"aaa"}), 0L); + streamWriter.append(createProtoRows(new String[] {"aaa"}), 0L); assertEquals(0L, response.get().getAppendResult().getOffset().getValue()); } @@ -2154,13 +2198,13 @@ public void testStreamReconnect() throws IOException, InterruptedException, Exec // Currently there is a bug that reconnection must wait 5 seconds to get the real row count. Thread.sleep(5000L); ApiFuture response = - streamWriter.append(CreateProtoRows(new String[] {"bbb"}), 1L); + streamWriter.append(createProtoRows(new String[] {"bbb"}), 1L); assertEquals(1L, response.get().getAppendResult().getOffset().getValue()); } } @Test - public void testMultiplexingMixedLocation() + void testMultiplexingMixedLocation() throws IOException, InterruptedException, ExecutionException { ConnectionWorkerPool.setOptions( ConnectionWorkerPool.Settings.builder() @@ -2199,11 +2243,11 @@ public void testMultiplexingMixedLocation() .setTraceId(TEST_TRACE_ID) .build(); ApiFuture response1 = - streamWriter1.append(CreateProtoRows(new String[] {"aaa"})); + streamWriter1.append(createProtoRows(new String[] {"aaa"})); ApiFuture response2 = - streamWriter2.append(CreateProtoRowsComplex(new String[] {"aaa"})); + streamWriter2.append(createProtoRowsComplex(new String[] {"aaa"})); ApiFuture response3 = - streamWriter3.append(CreateProtoRows(new String[] {"bbb"})); + streamWriter3.append(createProtoRows(new String[] {"bbb"})); assertEquals(0L, response1.get().getAppendResult().getOffset().getValue()); assertEquals(0L, response2.get().getAppendResult().getOffset().getValue()); assertEquals(0L, response3.get().getAppendResult().getOffset().getValue()); @@ -2216,7 +2260,7 @@ public void testMultiplexingMixedLocation() } @Test - public void testLargeRequest() throws IOException, InterruptedException, ExecutionException { + void testLargeRequest() throws IOException, InterruptedException, ExecutionException { String tableName = "largeRequestTable"; TableId tableId = TableId.of(DATASET, tableName); Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build(); @@ -2227,7 +2271,7 @@ public void testLargeRequest() throws IOException, InterruptedException, Executi TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); try (StreamWriter streamWriter = StreamWriter.newBuilder(parent.toString() + "/_default") - .setWriterSchema(CreateProtoSchemaWithColField()) + .setWriterSchema(createProtoSchemaWithColField()) .build()) { List sizeSet = Arrays.asList(15 * 1024 * 1024, 1024); List> responseList = @@ -2238,7 +2282,7 @@ public void testLargeRequest() throws IOException, InterruptedException, Executi LOG.info("Sending size: " + size); responseList.add( streamWriter.append( - CreateProtoRows( + createProtoRows( new String[] { new String(new char[size]).replace('\u0000', (char) (r.nextInt(26) + 'a')) }))); @@ -2255,4 +2299,289 @@ public void testLargeRequest() throws IOException, InterruptedException, Executi assertEquals("50", queryIter.next().get(0).getStringValue()); } } + + // Tests that inputs for micro and picos are able to use Arrow to write + // to BQ + @Test + void timestamp_arrowWrite() throws IOException { + String tableName = "bqstorage_timestamp_write_arrow"; + // Opt to create a new table to write to instead of re-using table to prevent + // the test from failing due to any issues with deleting data after test. + // Increases the test time duration, but would be more resilient to transient + // failures + createTimestampTable(tableName); + + // Define the fields as Arrow types that are compatible with BQ Schema types + List fields = + ImmutableList.of( + new org.apache.arrow.vector.types.pojo.Field( + TIMESTAMP_COLUMN_NAME, + FieldType.nullable( + new ArrowType.Timestamp( + org.apache.arrow.vector.types.TimeUnit.MICROSECOND, "UTC")), + null), + new org.apache.arrow.vector.types.pojo.Field( + TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, + FieldType.nullable( + new ArrowType.Timestamp( + org.apache.arrow.vector.types.TimeUnit.NANOSECOND, "UTC")), + null)); + org.apache.arrow.vector.types.pojo.Schema arrowSchema = + new org.apache.arrow.vector.types.pojo.Schema(fields, null); + + int numRows = INPUT_ARROW_WRITE_TIMESTAMPS.length; + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(parent.toString() + "/_default") + .setWriterSchema(arrowSchema) + .build()) { + try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { + TimeStampMicroTZVector timestampVector = + (TimeStampMicroTZVector) root.getVector(TIMESTAMP_COLUMN_NAME); + TimeStampNanoTZVector timestampHigherPrecisionVector = + (TimeStampNanoTZVector) root.getVector(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME); + timestampVector.allocateNew(numRows); + timestampHigherPrecisionVector.allocateNew(numRows); + + for (int i = 0; i < numRows; i++) { + timestampVector.set(i, (Long) INPUT_ARROW_WRITE_TIMESTAMPS[i][0]); + timestampHigherPrecisionVector.set(i, (Long) INPUT_ARROW_WRITE_TIMESTAMPS[i][1]); + } + root.setRowCount(numRows); + + CompressionCodec codec = + NoCompressionCodec.Factory.INSTANCE.createCodec( + CompressionUtil.CodecType.NO_COMPRESSION); + VectorUnloader vectorUnloader = + new VectorUnloader(root, /* includeNullCount= */ true, codec, /* alignBuffers= */ true); + org.apache.arrow.vector.ipc.message.ArrowRecordBatch batch = + vectorUnloader.getRecordBatch(); + // Asynchronous append. + ApiFuture future = streamWriter.append(batch); + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + } + assertTimestamps(tableName, EXPECTED_ARROW_WRITE_TIMESTAMPS_ISO_OUTPUT); + } + + // Tests that inputs for micro and picos are able to converted to protobuf + // and written to BQ + @Test + void timestamp_protobufWrite() + throws IOException, DescriptorValidationException, InterruptedException { + String tableName = "bqstorage_timestamp_write_protobuf_schema_aware"; + // Opt to create a new table to write to instead of re-using table to prevent + // the test from failing due to any issues with deleting data after test. + // Increases the test time duration, but would be more resilient to transient + // failures + createTimestampTable(tableName); + + // Define the table schema so that the automatic converter is able to + // determine how to convert from Json -> Protobuf + TableFieldSchema testTimestamp = + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_COLUMN_NAME) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableFieldSchema testTimestampHighPrecision = + TableFieldSchema.newBuilder() + .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME) + .setTimestampPrecision( + Int64Value.newBuilder().setValue(Helper.PICOSECOND_PRECISION).build()) + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields(testTimestamp) + .addFields(testTimestampHighPrecision) + .build(); + + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (JsonStreamWriter jsonStreamWriter = + JsonStreamWriter.newBuilder(parent.toString(), tableSchema).build()) { + + // Creates a single payload to append (JsonArray with multiple JsonObjects) + // Each JsonObject contains a row (one micros, one picos) + JSONArray jsonArray = new JSONArray(); + for (Object[] timestampData : Helper.INPUT_TIMESTAMPS) { + JSONObject row = new JSONObject(); + row.put(TIMESTAMP_COLUMN_NAME, timestampData[0]); + row.put(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, timestampData[1]); + jsonArray.put(row); + } + ApiFuture future = jsonStreamWriter.append(jsonArray); + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + assertTimestamps(tableName, EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT); + } + + // Tests that users can use a Protobuf message that contains second a fractional + // part (pico) to be written to BQ + @Test + void timestamp_protobufWrite_customMessage_higherPrecision() + throws IOException, DescriptorValidationException { + String tableName = "bqstorage_timestamp_write_protobuf_custom_descriptor"; + // Opt to create a new table to write to instead of re-using table to prevent + // the test from failing due to any issues with deleting data after test. + // Increases the test time duration, but would be more resilient to transient + // failures + createTimestampTable(tableName); + + /* + A sample protobuf format: + message Wrapper { + message TimestampPicos { + int64 seconds = 1; + int64 picoseconds = 2; + } + Wrapper timestampHigherPrecision = 1; + // ... + } + */ + String wrapperProtoName = "Wrapper"; + String timestampPicosProtoName = "TimestampPicos"; + String secondsProtoName = "seconds"; + String picosProtoName = "picoseconds"; + DescriptorProto timestampPicosDescriptor = + DescriptorProto.newBuilder() + .setName(timestampPicosProtoName) + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName(secondsProtoName) + .setNumber(1) + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64) + .build()) + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName(picosProtoName) + .setNumber(2) + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64) + .build()) + .build(); + DescriptorProto wrapperDescriptor = + DescriptorProto.newBuilder() + .setName(wrapperProtoName) // random name + .addField( + DescriptorProtos.FieldDescriptorProto.newBuilder() + .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME) + .setNumber(3) + .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE) + .setTypeName(timestampPicosDescriptor.getName()) + .build()) + .addNestedType(timestampPicosDescriptor) + .build(); + ProtoSchema protoSchema = + ProtoSchema.newBuilder().setProtoDescriptor(wrapperDescriptor).build(); + + TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); + try (StreamWriter streamWriter = + StreamWriter.newBuilder(parent.toString() + "/_default", writeClient) + .setWriterSchema(protoSchema) + .build()) { + DescriptorProtos.FileDescriptorProto fileProto = + DescriptorProtos.FileDescriptorProto.newBuilder() + .setName("test.proto") // dummy proto file + .addMessageType(wrapperDescriptor) + .build(); + + // Build the runtime descriptor (resolves types and names) + Descriptors.FileDescriptor file = + Descriptors.FileDescriptor.buildFrom(fileProto, new Descriptors.FileDescriptor[] {}); + + // Get the handle to the "wrapper" message type + Descriptors.Descriptor descriptor = file.findMessageTypeByName(wrapperProtoName); + + ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); + for (Long[] timestampParts : INPUT_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS) { + Message message = + DynamicMessage.newBuilder(descriptor) + .setField( + descriptor.findFieldByName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME), + DynamicMessage.newBuilder( + descriptor.findNestedTypeByName(timestampPicosProtoName)) + .setField( + descriptor + .findNestedTypeByName(timestampPicosProtoName) + .findFieldByName(secondsProtoName), + timestampParts[0]) + .setField( + descriptor + .findNestedTypeByName(timestampPicosProtoName) + .findFieldByName(picosProtoName), + timestampParts[1]) + .build()) + .build(); + rowsBuilder.addSerializedRows(message.toByteString()); + } + ApiFuture future = streamWriter.append(rowsBuilder.build()); + ApiFutures.addCallback( + future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor()); + } + String table = + BigQueryResource.formatTableResource( + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + // Read all the data as Avro GenericRecords + List rows = Helper.readAllRows(readClient, parentProjectId, table, null); + List timestampHigherPrecision = + rows.stream() + .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString()) + .collect(Collectors.toList()); + assertEquals( + EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT.length, + timestampHigherPrecision.size()); + for (int i = 0; + i < EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT.length; + i++) { + assertEquals( + EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT[i], + timestampHigherPrecision.get(i)); + } + } + + private void createTimestampTable(String tableName) { + Schema bqTableSchema = + Schema.of( + Field.newBuilder(TIMESTAMP_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setMode(Mode.NULLABLE) + .build(), + Field.newBuilder(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP) + .setMode(Mode.NULLABLE) + .setTimestampPrecision(Helper.PICOSECOND_PRECISION) + .build()); + + TableId testTableId = TableId.of(DATASET, tableName); + bigquery.create( + TableInfo.of( + testTableId, StandardTableDefinition.newBuilder().setSchema(bqTableSchema).build())); + } + + private void assertTimestamps(String tableName, Object[][] expected) throws IOException { + String table = + BigQueryResource.formatTableResource( + ServiceOptions.getDefaultProjectId(), DATASET, tableName); + + // Read all the data as Avro GenericRecords + List rows = Helper.readAllRows(readClient, parentProjectId, table, null); + + // Each timestamp response is expected to contain two fields: + // 1. Micros from timestamp as a Long and 2. ISO8601 instant with picos precision + List timestamps = + rows.stream().map(x -> (Long) x.get(TIMESTAMP_COLUMN_NAME)).collect(Collectors.toList()); + List timestampHigherPrecision = + rows.stream() + .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString()) + .collect(Collectors.toList()); + + assertEquals(expected.length, timestamps.size()); + assertEquals(expected.length, timestampHigherPrecision.size()); + for (int i = 0; i < timestampHigherPrecision.size(); i++) { + assertEquals(expected[i][0], timestamps.get(i)); + assertEquals(expected[i][1], timestampHigherPrecision.get(i)); + } + } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java index a653143ed8..b3c84ec403 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java @@ -16,7 +16,8 @@ package com.google.cloud.bigquery.storage.v1.it; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import com.google.api.core.ApiFuture; import com.google.cloud.ServiceOptions; @@ -44,14 +45,14 @@ import java.time.LocalTime; import java.util.Iterator; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import org.json.JSONArray; import org.json.JSONObject; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; -public class ITBigQueryTimeEncoderTest { +class ITBigQueryTimeEncoderTest { private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); private static final String TABLE = "testtable"; private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; @@ -60,8 +61,8 @@ public class ITBigQueryTimeEncoderTest { private static TableInfo tableInfo; private static BigQuery bigquery; - @BeforeClass - public static void beforeClass() throws IOException { + @BeforeAll + static void beforeAll() throws IOException { client = BigQueryWriteClient.create(); RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); @@ -97,10 +98,11 @@ public static void beforeClass() throws IOException { bigquery.create(tableInfo); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() throws InterruptedException { if (client != null) { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } if (bigquery != null) { RemoteBigQueryHelper.forceDelete(bigquery, DATASET); @@ -108,7 +110,7 @@ public static void afterClass() { } @Test - public void TestTimeEncoding() + void TestTimeEncoding() throws IOException, InterruptedException, ExecutionException, @@ -187,7 +189,7 @@ public void TestTimeEncoding() row.put("test_date", 300); JSONArray jsonArr = new JSONArray(new JSONObject[] {row}); ApiFuture response = jsonStreamWriter.append(jsonArr, -1); - Assert.assertFalse(response.get().getAppendResult().hasOffset()); + assertFalse(response.get().getAppendResult().hasOffset()); TableResult result = bigquery.listTableData( tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java index 90adb81b1e..2d9378341d 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java @@ -17,7 +17,8 @@ package com.google.cloud.bigquery.storage.v1.it; import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import com.google.api.core.ApiFuture; import com.google.cloud.bigquery.BigQuery; @@ -38,6 +39,7 @@ import com.google.cloud.bigquery.storage.v1.StreamWriter; import com.google.cloud.bigquery.storage.v1.TableName; import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.cloud.bigquery.storage.v1.it.util.WriteRetryTestUtil; import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; import com.google.protobuf.DescriptorProtos.DescriptorProto; import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; @@ -45,14 +47,14 @@ import io.grpc.Status.Code; import java.io.IOException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** Integration tests for BigQuery Write API. */ -public class ITBigQueryWriteNonQuotaRetryTest { +class ITBigQueryWriteNonQuotaRetryTest { private static final Logger LOG = Logger.getLogger(ITBigQueryWriteQuotaRetryTest.class.getName()); private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); private static final String TABLE = "testtable"; @@ -63,8 +65,8 @@ public class ITBigQueryWriteNonQuotaRetryTest { private static BigQueryWriteClient client; private static BigQuery bigquery; - @BeforeClass - public static void beforeClass() throws IOException { + @BeforeAll + static void beforeAll() throws IOException { client = BigQueryWriteClient.create(); RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); @@ -85,10 +87,11 @@ public static void beforeClass() throws IOException { bigquery.create(tableInfo); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() throws InterruptedException { if (client != null) { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } if (bigquery != null) { @@ -107,7 +110,7 @@ ProtoRows CreateProtoRows(String[] messages) { } @Test - public void testJsonStreamWriterCommittedStreamWithNonQuotaRetry() + void testJsonStreamWriterCommittedStreamWithNonQuotaRetry() throws IOException, InterruptedException, DescriptorValidationException { WriteRetryTestUtil.runExclusiveRetryTest( bigquery, @@ -120,7 +123,7 @@ public void testJsonStreamWriterCommittedStreamWithNonQuotaRetry() } @Test - public void testJsonStreamWriterDefaultStreamWithNonQuotaRetry() + void testJsonStreamWriterDefaultStreamWithNonQuotaRetry() throws IOException, InterruptedException, DescriptorValidationException { WriteRetryTestUtil.runDefaultRetryTest( bigquery, @@ -131,11 +134,10 @@ public void testJsonStreamWriterDefaultStreamWithNonQuotaRetry() /* rowBatchSize= */ 1); } - // Moved to ITBigQueryWriteNonQuotaRetryTest from ITBigQueryWriteManualClientTest, as it requires + // Moved to ITBigQueryWriteNonQuotaRetryTest from ITBigQueryWriteClientTest, as it requires // usage of the project this file uses to inject errors (bq-write-api-java-retry-test). @Test - public void testDefaultRequestLimit() - throws IOException, InterruptedException, ExecutionException { + void testDefaultRequestLimit() throws IOException, InterruptedException, ExecutionException { DatasetId datasetId = DatasetId.of(NON_QUOTA_RETRY_PROJECT_ID, RemoteBigQueryHelper.generateDatasetName()); DatasetInfo datasetInfo = DatasetInfo.newBuilder(datasetId).build(); @@ -170,44 +172,28 @@ public void testDefaultRequestLimit() streamWriter.append( CreateProtoRows( new String[] {new String(new char[19 * 1024 * 1024]).replace("\0", "a")})); - try { - AppendRowsResponse resp = response.get(); - LOG.info( - "Message succeded. Dataset info: " - + datasetInfo.toString() - + " tableinfo: " - + tableInfo.toString() - + " parent: " - + parent - + "streamWriter: " - + streamWriter.toString() - + "response: " - + resp); - Assert.fail("Large request should fail with InvalidArgumentError"); - } catch (ExecutionException ex) { - LOG.info( - "Message failed. Dataset info: " - + datasetInfo.toString() - + " tableinfo: " - + tableInfo.toString() - + " parent: " - + parent - + "streamWriter: " - + streamWriter); - assertEquals(io.grpc.StatusRuntimeException.class, ex.getCause().getClass()); - io.grpc.StatusRuntimeException actualError = - (io.grpc.StatusRuntimeException) ex.getCause(); - // This verifies that the Beam connector can consume this custom exception's grpc - // StatusCode - // TODO(yiru): temp fix to unblock test, while final fix is being rolled out. - if (actualError.getStatus().getCode() != Code.INTERNAL) { - assertEquals(Code.INVALID_ARGUMENT, actualError.getStatus().getCode()); - assertThat( - actualError - .getStatus() - .getDescription() - .contains("AppendRows request too large: 19923131 limit 10485760")); - } + ExecutionException ex = assertThrows(ExecutionException.class, () -> response.get()); + LOG.info( + "Message failed. Dataset info: " + + datasetInfo.toString() + + " tableinfo: " + + tableInfo.toString() + + " parent: " + + parent + + "streamWriter: " + + streamWriter); + assertEquals(io.grpc.StatusRuntimeException.class, ex.getCause().getClass()); + io.grpc.StatusRuntimeException actualError = (io.grpc.StatusRuntimeException) ex.getCause(); + // This verifies that the Beam connector can consume this custom exception's grpc + // StatusCode + // TODO(yiru): temp fix to unblock test, while final fix is being rolled out. + if (actualError.getStatus().getCode() != Code.INTERNAL) { + assertEquals(Code.INVALID_ARGUMENT, actualError.getStatus().getCode()); + assertThat( + actualError + .getStatus() + .getDescription() + .contains("AppendRows request too large: 19923131 limit 10485760")); } } } finally { diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java index 86207508fa..b4069a7aee 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java @@ -26,13 +26,15 @@ import com.google.cloud.bigquery.TableInfo; import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.cloud.bigquery.storage.v1.it.util.WriteRetryTestUtil; import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; import com.google.protobuf.Descriptors.DescriptorValidationException; import java.io.IOException; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** Integration tests for BigQuery Write API. */ public class ITBigQueryWriteQuotaRetryTest { @@ -46,8 +48,8 @@ public class ITBigQueryWriteQuotaRetryTest { private static BigQueryWriteClient client; private static BigQuery bigquery; - @BeforeClass - public static void beforeClass() throws IOException { + @BeforeAll + static void beforeAll() throws IOException { client = BigQueryWriteClient.create(); RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); @@ -68,10 +70,11 @@ public static void beforeClass() throws IOException { bigquery.create(tableInfo); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() throws InterruptedException { if (client != null) { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } if (bigquery != null) { @@ -81,7 +84,7 @@ public static void afterClass() { } @Test - public void testJsonStreamWriterCommittedStreamWithQuotaRetry() + void testJsonStreamWriterCommittedStreamWithQuotaRetry() throws IOException, InterruptedException, DescriptorValidationException { WriteRetryTestUtil.runExclusiveRetryTest( bigquery, @@ -94,7 +97,7 @@ public void testJsonStreamWriterCommittedStreamWithQuotaRetry() } @Test - public void testJsonStreamWriterDefaultStreamWithQuotaRetry() + void testJsonStreamWriterDefaultStreamWithQuotaRetry() throws IOException, InterruptedException, DescriptorValidationException { WriteRetryTestUtil.runDefaultRetryTest( bigquery, diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/BigQueryResource.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java similarity index 90% rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/BigQueryResource.java rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java index b42ff26e63..04daffb348 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/BigQueryResource.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.google.cloud.bigquery.storage.v1.it; +package com.google.cloud.bigquery.storage.v1.it.util; /** Test helper class to generate BigQuery resource paths. */ public class BigQueryResource { @@ -28,7 +28,7 @@ public class BigQueryResource { * @param tableId * @return a path to a table resource. */ - public static String FormatTableResource(String projectId, String datasetId, String tableId) { + public static String formatTableResource(String projectId, String datasetId, String tableId) { return String.format("projects/%s/datasets/%s/tables/%s", projectId, datasetId, tableId); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java new file mode 100644 index 0000000000..1e1b0e2fb0 --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java @@ -0,0 +1,204 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigquery.storage.v1.it.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import com.google.api.core.ApiFutureCallback; +import com.google.api.gax.rpc.ServerStream; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.AvroSerializationOptions; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.common.base.Preconditions; +import com.google.protobuf.util.Timestamps; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericRecordBuilder; + +public class Helper { + + public static final long PICOSECOND_PRECISION = 12; + public static final String TIMESTAMP_COLUMN_NAME = "timestamp"; + public static final String TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME = "timestampHigherPrecision"; + + // Sample test cases for timestamps. First element is micros from epcoh and the second element + // is the ISO format in with picosecond precision + public static final Object[][] INPUT_TIMESTAMPS = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789123Z"}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789123Z"}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789123Z"}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789123Z"} + }; + + // Expected response for timestamps from the input. If enabled with ISO as output, it will + // ISO8601 format for any picosecond enabled column. + public static final Object[][] EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT = + new Object[][] { + {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789123Z"}, + {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789123Z"}, + {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789123Z"}, + {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789123Z"} + }; + + public static ServiceAccountCredentials loadCredentials(String credentialFile) { + try (InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes())) { + return ServiceAccountCredentials.fromStream(keyStream); + } catch (IOException e) { + fail("Couldn't create fake JSON credentials."); + } + return null; + } + + public static class AppendCompleteCallback implements ApiFutureCallback { + private final Object lock = new Object(); + private int batchCount = 0; + + public void onSuccess(AppendRowsResponse response) { + synchronized (lock) { + if (response.hasError()) { + System.out.format("Error: %s\n", response.getError()); + } else { + ++batchCount; + System.out.format("Wrote batch %d\n", batchCount); + } + } + } + + public void onFailure(Throwable throwable) { + System.out.format("Error: %s\n", throwable.toString()); + } + } + + /** + * Reads all the rows from the specified table. + * + *

For every row, the consumer is called for processing. + * + * @param table + * @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned. + * @param filter Optional. If specified, it will be used to restrict returned data. + * @param consumer that receives all Avro rows. + * @throws IOException + */ + public static void processRowsAtSnapshot( + BigQueryReadClient client, + String parentProjectId, + String table, + Long snapshotInMillis, + String filter, + SimpleRowReaderAvro.AvroRowConsumer consumer) + throws IOException { + Preconditions.checkNotNull(table); + Preconditions.checkNotNull(consumer); + + CreateReadSessionRequest.Builder createSessionRequestBuilder = + CreateReadSessionRequest.newBuilder() + .setParent(parentProjectId) + .setMaxStreamCount(1) + .setReadSession( + ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .setReadOptions( + ReadSession.TableReadOptions.newBuilder() + .setAvroSerializationOptions( + AvroSerializationOptions.newBuilder() + .setPicosTimestampPrecision( + // This serialization option only impacts columns that are + // type. `TIMESTAMP_PICOS` and has no impact on other + // columns types. + AvroSerializationOptions.PicosTimestampPrecision + .TIMESTAMP_PRECISION_PICOS) + .build()) + .build()) + .build()); + + if (snapshotInMillis != null) { + createSessionRequestBuilder + .getReadSessionBuilder() + .setTableModifiers( + ReadSession.TableModifiers.newBuilder() + .setSnapshotTime(Timestamps.fromMillis(snapshotInMillis)) + .build()); + } + + if (filter != null && !filter.isEmpty()) { + createSessionRequestBuilder + .getReadSessionBuilder() + .setReadOptions( + ReadSession.TableReadOptions.newBuilder().setRowRestriction(filter).build()); + } + + ReadSession session = client.createReadSession(createSessionRequestBuilder.build()); + assertEquals( + 1, + session.getStreamsCount(), + String.format( + "Did not receive expected number of streams for table '%s' CreateReadSession" + + " response:%n%s", + table, session.toString())); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); + + SimpleRowReaderAvro reader = + new SimpleRowReaderAvro(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + reader.processRows(response.getAvroRows(), consumer); + } + } + + /** + * Reads all the rows from the specified table and returns a list as generic Avro records. + * + * @param table + * @param filter Optional. If specified, it will be used to restrict returned data. + * @return + */ + public static List readAllRows( + BigQueryReadClient client, String parentProjectId, String table, String filter) + throws IOException { + final List rows = new ArrayList<>(); + processRowsAtSnapshot( + client, + parentProjectId, + /* table= */ table, + /* snapshotInMillis= */ null, + /* filter= */ filter, + (SimpleRowReaderAvro.AvroRowConsumer) + record -> { + // clone the record since that reference will be reused by the reader. + rows.add(new GenericRecordBuilder(record).build()); + }); + return rows; + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderArrow.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java similarity index 79% rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderArrow.java rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java index 685f72fbc9..ff5b423c2c 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderArrow.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java @@ -14,8 +14,10 @@ * limitations under the License. */ -package com.google.cloud.bigquery.storage.v1.it; +package com.google.cloud.bigquery.storage.v1.it.util; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME; +import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME; import static com.google.common.truth.Truth.assertThat; import com.google.cloud.bigquery.FieldElementType; @@ -23,7 +25,6 @@ import com.google.cloud.bigquery.storage.v1.ArrowRecordBatch; import com.google.cloud.bigquery.storage.v1.ArrowSchema; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; import java.io.IOException; import java.time.LocalDateTime; import java.util.ArrayList; @@ -50,17 +51,44 @@ public interface ArrowBatchConsumer { void accept(VectorSchemaRoot root); } + public static class ArrowTimestampBatchConsumer implements ArrowBatchConsumer { + private final Object[][] expectedTimestampValues; + + public ArrowTimestampBatchConsumer(Object[][] expectedTimestampValues) { + this.expectedTimestampValues = expectedTimestampValues; + } + + @Override + public void accept(VectorSchemaRoot root) { + FieldVector timestampFieldVector = root.getVector(TIMESTAMP_COLUMN_NAME); + FieldVector timestampHigherPrecisionFieldVector = + root.getVector(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME); + assertThat(timestampFieldVector.getValueCount()) + .isEqualTo(timestampHigherPrecisionFieldVector.getValueCount()); + int count = timestampFieldVector.getValueCount(); + for (int i = 0; i < count; i++) { + long timestampMicros = (Long) timestampFieldVector.getObject(i); + assertThat(timestampMicros).isEqualTo(expectedTimestampValues[i][0]); + + // The Object comes back as `Text` which cannot be cast to String + // (use `toString()` instead) + String timestampHigherPrecisionISO = + timestampHigherPrecisionFieldVector.getObject(i).toString(); + assertThat(timestampHigherPrecisionISO).isEqualTo(expectedTimestampValues[i][1]); + } + } + } + /** ArrowRangeBatchConsumer accepts batch Arrow data and validate the range values. */ public static class ArrowRangeBatchConsumer implements ArrowBatchConsumer { - - private final ImmutableMap expectedRangeDateValues; - private final ImmutableMap expectedRangeDatetimeValues; - private final ImmutableMap expectedRangeTimestampValues; + private final Map expectedRangeDateValues; + private final Map expectedRangeDatetimeValues; + private final Map expectedRangeTimestampValues; public ArrowRangeBatchConsumer( - ImmutableMap expectedRangeDateValues, - ImmutableMap expectedRangeDatetimeValues, - ImmutableMap expectedRangeTimestampValues) { + Map expectedRangeDateValues, + Map expectedRangeDatetimeValues, + Map expectedRangeTimestampValues) { this.expectedRangeDateValues = expectedRangeDateValues; this.expectedRangeDatetimeValues = expectedRangeDatetimeValues; this.expectedRangeTimestampValues = expectedRangeTimestampValues; diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderAvro.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java similarity index 97% rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderAvro.java rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java index a23179c8c8..4914e93f5b 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderAvro.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.google.cloud.bigquery.storage.v1.it; +package com.google.cloud.bigquery.storage.v1.it.util; import com.google.cloud.bigquery.storage.v1.AvroRows; import com.google.common.base.Preconditions; diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/WriteRetryTestUtil.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java similarity index 95% rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/WriteRetryTestUtil.java rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java index e11e0707df..d5518f790f 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/WriteRetryTestUtil.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java @@ -14,9 +14,11 @@ * limitations under the License. */ -package com.google.cloud.bigquery.storage.v1.it; +package com.google.cloud.bigquery.storage.v1.it.util; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.fail; import com.google.api.core.ApiFuture; import com.google.api.gax.retrying.RetrySettings; @@ -43,7 +45,6 @@ import java.util.logging.Logger; import org.json.JSONArray; import org.json.JSONObject; -import org.junit.Assert; public class WriteRetryTestUtil { private static final Logger LOG = @@ -100,10 +101,10 @@ public static void runExclusiveRetryTest( for (int i = 0; i < requestCount; i++) { LOG.info("Waiting for request " + i); try { - Assert.assertEquals( + assertEquals( allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatchSize); } catch (ExecutionException ex) { - Assert.fail("Unexpected error " + ex); + fail("Unexpected error " + ex); } } } @@ -151,7 +152,7 @@ private static void runDefaultRetryTestInternal( try { assertFalse(allResponses.get(i).get().hasError()); } catch (Exception ex) { - Assert.fail("Unexpected error " + ex); + fail("Unexpected error " + ex); } } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java index ab6a8d1298..1befb2a949 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java @@ -32,16 +32,13 @@ import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse; import java.time.Duration; import java.util.Set; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@RunWith(JUnit4.class) public class EnhancedBigQueryReadStubSettingsTest { @Test - public void testSettingsArePreserved() { + void testSettingsArePreserved() { String endpoint = "some.other.host:123"; CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class); Duration watchdogInterval = Duration.ofSeconds(12); @@ -100,14 +97,14 @@ private void verifySettings( } @Test - public void testCreateReadSessionSettings() { + void testCreateReadSessionSettings() { UnaryCallSettings.Builder builder = EnhancedBigQueryReadStubSettings.newBuilder().createReadSessionSettings(); verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); } @Test - public void testReadRowsSettings() { + void testReadRowsSettings() { ServerStreamingCallSettings.Builder builder = EnhancedBigQueryReadStubSettings.newBuilder().readRowsSettings(); assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE); @@ -123,7 +120,7 @@ public void testReadRowsSettings() { } @Test - public void testSplitReadStreamSettings() { + void testSplitReadStreamSettings() { UnaryCallSettings.Builder builder = EnhancedBigQueryReadStubSettings.newBuilder().splitReadStreamSettings(); verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java index a68f6e3ae5..90f0c395f3 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java @@ -28,16 +28,14 @@ import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; import com.google.cloud.bigquery.storage.v1.ReadSession; import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest; +import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + public class ResourceHeaderTest { private static final String TEST_TABLE_REFERENCE = @@ -45,7 +43,7 @@ public class ResourceHeaderTest { private static final String TEST_STREAM_NAME = "streamName"; - private static final String NAME = "resource-header-test:123"; + private static final String NAME = "resource-header-test:123-v1"; private static final String HEADER_NAME = "x-goog-request-params"; @@ -68,14 +66,14 @@ public class ResourceHeaderTest { private LocalChannelProvider channelProvider; private BigQueryReadClient client; - @BeforeClass + @BeforeAll public static void setUpClass() throws Exception { server = new InProcessServer<>(new BigQueryReadImplBase() {}, NAME); server.start(); } - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { channelProvider = LocalChannelProvider.create(NAME); BigQueryReadSettings.Builder settingsBuilder = BigQueryReadSettings.newBuilder() @@ -85,19 +83,20 @@ public void setUp() throws Exception { client = BigQueryReadClient.create(settingsBuilder.build()); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } - @AfterClass - public static void tearDownClass() throws Exception { + @AfterAll + static void tearDownClass() throws Exception { server.stop(); server.blockUntilShutdown(); } @Test - public void createReadSessionTest() { + void createReadSessionTest() { try { client.createReadSession( "parents/project", ReadSession.newBuilder().setTable(TEST_TABLE_REFERENCE).build(), 1); @@ -108,7 +107,7 @@ public void createReadSessionTest() { } @Test - public void readRowsTest() { + void readRowsTest() { try { ReadRowsRequest request = ReadRowsRequest.newBuilder().setReadStream(TEST_STREAM_NAME).setOffset(125).build(); @@ -121,7 +120,7 @@ public void readRowsTest() { } @Test - public void splitReadStreamTest() { + void splitReadStreamTest() { try { client.splitReadStream(SplitReadStreamRequest.newBuilder().setName(TEST_STREAM_NAME).build()); } catch (UnimplementedException e) { diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java index cc009d9796..638414b5e2 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java @@ -30,15 +30,12 @@ import com.google.cloud.bigquery.storage.v1.*; import com.google.cloud.bigquery.storage.v1.BigQueryWriteGrpc.BigQueryWriteImplBase; import java.util.regex.Pattern; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + public class WriteHeaderTest { private static final String TEST_TABLE_REFERENCE = @@ -61,14 +58,14 @@ public class WriteHeaderTest { private LocalChannelProvider channelProvider; private BigQueryWriteClient client; - @BeforeClass + @BeforeAll public static void setUpClass() throws Exception { server = new InProcessServer<>(new BigQueryWriteImplBase() {}, NAME); server.start(); } - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { channelProvider = LocalChannelProvider.create(NAME); BigQueryWriteSettings.Builder settingsBuilder = BigQueryWriteSettings.newBuilder() @@ -83,19 +80,19 @@ public void setUp() throws Exception { client = BigQueryWriteClient.create(settingsBuilder.build()); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); } - @AfterClass - public static void tearDownClass() throws Exception { + @AfterAll + static void tearDownClass() throws Exception { server.stop(); server.blockUntilShutdown(); } @Test - public void createWriteStreamTest() { + void createWriteStreamTest() { CreateWriteStreamRequest request = CreateWriteStreamRequest.newBuilder() .setParent(TEST_TABLE_REFERENCE) @@ -110,7 +107,7 @@ public void createWriteStreamTest() { } @Test - public void writeRowsTest() { + void writeRowsTest() { BidiStreamingCallable callable = client.appendRowsCallable(); ApiCallContext apiCallContext = null; diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java index 2a2e513bec..711049d0c0 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java @@ -15,9 +15,12 @@ */ package com.google.cloud.bigquery.storage.v1.stub.readrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.GrpcTransportChannel; -import com.google.api.gax.rpc.FixedTransportChannelProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; import com.google.cloud.bigquery.storage.v1.BigQueryReadGrpc.BigQueryReadImplBase; @@ -27,50 +30,45 @@ import com.google.common.collect.Queues; import io.grpc.Status.Code; import io.grpc.stub.StreamObserver; -import io.grpc.testing.GrpcServerRule; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Queue; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class ReadRowsRetryTest { +import java.util.UUID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; - @Rule public GrpcServerRule serverRule = new GrpcServerRule(); +class ReadRowsRetryTest { private TestBigQueryStorageService service; private BigQueryReadClient client; + private InProcessServer server; - @Before - public void setUp() throws IOException { + @BeforeEach + void setUp() throws Exception { service = new TestBigQueryStorageService(); - serverRule.getServiceRegistry().addService(service); + String serverName = UUID.randomUUID().toString(); + server = new InProcessServer<>(service, serverName); + server.start(); BigQueryReadSettings settings = BigQueryReadSettings.newBuilder() .setCredentialsProvider(NoCredentialsProvider.create()) - .setTransportChannelProvider( - FixedTransportChannelProvider.create( - GrpcTransportChannel.create(serverRule.getChannel()))) + .setTransportChannelProvider(LocalChannelProvider.create(serverName)) .build(); client = BigQueryReadClient.create(settings); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); + server.stop(); + server.blockUntilShutdown(); } @Test - public void happyPathTest() { + void happyPathTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -78,11 +76,11 @@ public void happyPathTest() { .respondWithNumberOfRows(10) .respondWithNumberOfRows(7)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } @Test - public void immediateRetryTest() { + void immediateRetryTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -95,11 +93,11 @@ public void immediateRetryTest() { .respondWithNumberOfRows(10) .respondWithNumberOfRows(7)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } @Test - public void multipleRetryTestWithZeroInitialOffset() { + void multipleRetryTestWithZeroInitialOffset() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -117,11 +115,11 @@ public void multipleRetryTestWithZeroInitialOffset() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6)); - Assert.assertEquals(28, getRowCount(request)); + assertEquals(28, getRowCount(request)); } @Test - public void multipleRetryTestWithNonZeroInitialOffset() { + void multipleRetryTestWithNonZeroInitialOffset() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17); service.expectations.add( RpcExpectation.create() @@ -139,11 +137,11 @@ public void multipleRetryTestWithNonZeroInitialOffset() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3)); - Assert.assertEquals(25, getRowCount(request)); + assertEquals(25, getRowCount(request)); } @Test - public void errorAtTheVeryEndTest() { + void errorAtTheVeryEndTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -155,7 +153,7 @@ public void errorAtTheVeryEndTest() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } private int getRowCount(ReadRowsRequest request) { @@ -179,17 +177,15 @@ public void readRows( RpcExpectation expectedRpc = expectations.poll(); currentRequestIndex++; - Assert.assertNotNull( - "Unexpected request #" + currentRequestIndex + ": " + request.toString(), expectedRpc); - - Assert.assertEquals( + assertNotNull( + expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString()); + assertEquals( + expectedRpc.expectedRequest, + request, "Expected request #" + currentRequestIndex + " does not match actual request: " - + request.toString(), - expectedRpc.expectedRequest, - request); - + + request.toString()); for (ReadRowsResponse response : expectedRpc.responses) { responseObserver.onNext(response); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java index 1488d84014..87547df18c 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java index 9435f45ded..7881256590 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java index b2086c079d..d0e6ae8c8a 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java index d932d89d26..420fc9d5c8 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java index 9e8c74228b..ce8b9667a5 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java index a7968167a9..c342ddfd11 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java index 9a171fa23f..cc018cedda 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java index d12166157b..417cbed9fc 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java @@ -15,6 +15,10 @@ */ package com.google.cloud.bigquery.storage.v1beta1; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.google.api.gax.core.NoCredentialsProvider; import com.google.api.gax.grpc.GaxGrpcProperties; import com.google.api.gax.grpc.GrpcStatusCode; @@ -54,14 +58,16 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.ExecutionException; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -public class BigQueryStorageClientTest { +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class BigQueryStorageClientTest { private static MockBigQueryStorage mockBigQueryStorage; private static MockServiceHelper serviceHelper; private BigQueryStorageClient client; @@ -69,21 +75,21 @@ public class BigQueryStorageClientTest { private int retryCount; private Code lastRetryStatusCode; - @BeforeClass - public static void startStaticServer() { + @BeforeAll + static void startStaticServer() { mockBigQueryStorage = new MockBigQueryStorage(); serviceHelper = new MockServiceHelper("in-process-1", Arrays.asList(mockBigQueryStorage)); serviceHelper.start(); } - @AfterClass - public static void stopServer() { + @AfterAll + static void stopServer() { serviceHelper.stop(); } - @Before - public void setUp() throws IOException { + @BeforeEach + void setUp() throws IOException { serviceHelper.reset(); channelProvider = serviceHelper.createChannelProvider(); retryCount = 0; @@ -106,14 +112,14 @@ public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) { client = BigQueryStorageClient.create(settings); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); } @Test @SuppressWarnings("all") - public void createReadSessionTest() { + void createReadSessionTest() { String name = "name3373707"; ReadSession expectedResponse = ReadSession.newBuilder().setName(name).build(); mockBigQueryStorage.addResponse(expectedResponse); @@ -123,16 +129,16 @@ public void createReadSessionTest() { int requestedStreams = 1017221410; ReadSession actualResponse = client.createReadSession(tableReference, parent, requestedStreams); - Assert.assertEquals(expectedResponse, actualResponse); + assertEquals(expectedResponse, actualResponse); List actualRequests = mockBigQueryStorage.getRequests(); - Assert.assertEquals(1, actualRequests.size()); + assertEquals(1, actualRequests.size()); CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0); - Assert.assertEquals(tableReference, actualRequest.getTableReference()); - Assert.assertEquals(parent, actualRequest.getParent()); - Assert.assertEquals(requestedStreams, actualRequest.getRequestedStreams()); - Assert.assertTrue( + assertEquals(tableReference, actualRequest.getTableReference()); + assertEquals(parent, actualRequest.getParent()); + assertEquals(requestedStreams, actualRequest.getRequestedStreams()); + assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); @@ -140,25 +146,22 @@ public void createReadSessionTest() { @Test @SuppressWarnings("all") - public void createReadSessionExceptionTest() throws Exception { + void createReadSessionExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryStorage.addException(exception); - try { - TableReference tableReference = TableReference.newBuilder().build(); - String parent = "parent-995424086"; - int requestedStreams = 1017221410; + TableReference tableReference = TableReference.newBuilder().build(); + String parent = "parent-995424086"; + int requestedStreams = 1017221410; - client.createReadSession(tableReference, parent, requestedStreams); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception - } + assertThrows( + InvalidArgumentException.class, + () -> client.createReadSession(tableReference, parent, requestedStreams)); } @Test @SuppressWarnings("all") - public void readRowsTest() throws Exception { + void readRowsTest() throws Exception { ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().build(); mockBigQueryStorage.addResponse(expectedResponse); StreamPosition readPosition = StreamPosition.newBuilder().build(); @@ -170,16 +173,16 @@ public void readRowsTest() throws Exception { callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); - Assert.assertEquals(expectedResponse, actualResponses.get(0)); + assertEquals(1, actualResponses.size()); + assertEquals(expectedResponse, actualResponses.get(0)); - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsExceptionTest() throws Exception { + void readRowsExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryStorage.addException(exception); StreamPosition readPosition = StreamPosition.newBuilder().build(); @@ -190,22 +193,19 @@ public void readRowsExceptionTest() throws Exception { ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); - try { - List actualResponses = responseObserver.future().get(); - Assert.fail("No exception thrown"); - } catch (ExecutionException e) { - Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); - InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); - Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); - } - - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void batchCreateReadSessionStreamsTest() { + void batchCreateReadSessionStreamsTest() { BatchCreateReadSessionStreamsResponse expectedResponse = BatchCreateReadSessionStreamsResponse.newBuilder().build(); mockBigQueryStorage.addResponse(expectedResponse); @@ -215,16 +215,16 @@ public void batchCreateReadSessionStreamsTest() { BatchCreateReadSessionStreamsResponse actualResponse = client.batchCreateReadSessionStreams(session, requestedStreams); - Assert.assertEquals(expectedResponse, actualResponse); + assertEquals(expectedResponse, actualResponse); List actualRequests = mockBigQueryStorage.getRequests(); - Assert.assertEquals(1, actualRequests.size()); + assertEquals(1, actualRequests.size()); BatchCreateReadSessionStreamsRequest actualRequest = (BatchCreateReadSessionStreamsRequest) actualRequests.get(0); - Assert.assertEquals(session, actualRequest.getSession()); - Assert.assertEquals(requestedStreams, actualRequest.getRequestedStreams()); - Assert.assertTrue( + assertEquals(session, actualRequest.getSession()); + assertEquals(requestedStreams, actualRequest.getRequestedStreams()); + assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); @@ -232,24 +232,21 @@ public void batchCreateReadSessionStreamsTest() { @Test @SuppressWarnings("all") - public void batchCreateReadSessionStreamsExceptionTest() throws Exception { + void batchCreateReadSessionStreamsExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryStorage.addException(exception); - try { - ReadSession session = ReadSession.newBuilder().build(); - int requestedStreams = 1017221410; + ReadSession session = ReadSession.newBuilder().build(); + int requestedStreams = 1017221410; - client.batchCreateReadSessionStreams(session, requestedStreams); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception - } + assertThrows( + InvalidArgumentException.class, + () -> client.batchCreateReadSessionStreams(session, requestedStreams)); } @Test @SuppressWarnings("all") - public void finalizeStreamTest() { + void finalizeStreamTest() { Empty expectedResponse = Empty.newBuilder().build(); mockBigQueryStorage.addResponse(expectedResponse); @@ -258,11 +255,11 @@ public void finalizeStreamTest() { client.finalizeStream(stream); List actualRequests = mockBigQueryStorage.getRequests(); - Assert.assertEquals(1, actualRequests.size()); + assertEquals(1, actualRequests.size()); FinalizeStreamRequest actualRequest = (FinalizeStreamRequest) actualRequests.get(0); - Assert.assertEquals(stream, actualRequest.getStream()); - Assert.assertTrue( + assertEquals(stream, actualRequest.getStream()); + assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); @@ -270,37 +267,32 @@ public void finalizeStreamTest() { @Test @SuppressWarnings("all") - public void finalizeStreamExceptionTest() throws Exception { + void finalizeStreamExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryStorage.addException(exception); - try { - Stream stream = Stream.newBuilder().build(); + Stream stream = Stream.newBuilder().build(); - client.finalizeStream(stream); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception - } + assertThrows(InvalidArgumentException.class, () -> client.finalizeStream(stream)); } @Test @SuppressWarnings("all") - public void splitReadStreamTest() { + void splitReadStreamTest() { SplitReadStreamResponse expectedResponse = SplitReadStreamResponse.newBuilder().build(); mockBigQueryStorage.addResponse(expectedResponse); Stream originalStream = Stream.newBuilder().build(); SplitReadStreamResponse actualResponse = client.splitReadStream(originalStream); - Assert.assertEquals(expectedResponse, actualResponse); + assertEquals(expectedResponse, actualResponse); List actualRequests = mockBigQueryStorage.getRequests(); - Assert.assertEquals(1, actualRequests.size()); + assertEquals(1, actualRequests.size()); SplitReadStreamRequest actualRequest = (SplitReadStreamRequest) actualRequests.get(0); - Assert.assertEquals(originalStream, actualRequest.getOriginalStream()); - Assert.assertTrue( + assertEquals(originalStream, actualRequest.getOriginalStream()); + assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); @@ -308,23 +300,18 @@ public void splitReadStreamTest() { @Test @SuppressWarnings("all") - public void splitReadStreamExceptionTest() throws Exception { + void splitReadStreamExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryStorage.addException(exception); - try { - Stream originalStream = Stream.newBuilder().build(); + Stream originalStream = Stream.newBuilder().build(); - client.splitReadStream(originalStream); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception - } + assertThrows(InvalidArgumentException.class, () -> client.splitReadStream(originalStream)); } @Test @SuppressWarnings("all") - public void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { + void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { ApiException exception = new InternalException( new StatusRuntimeException( @@ -343,15 +330,15 @@ public void readRowsRetryingEOSExceptionTest() throws ExecutionException, Interr ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); } @Test @SuppressWarnings("all") - public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { + void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { ApiException exception = new InternalException( new StatusRuntimeException( @@ -370,15 +357,15 @@ public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, Inte ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); } @Test @SuppressWarnings("all") - public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() + void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() throws ExecutionException, InterruptedException { ApiException exception = new ResourceExhaustedException( @@ -397,23 +384,19 @@ public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); - try { - List actualResponses = responseObserver.future().get(); - Assert.fail("No exception thrown"); - } catch (ExecutionException e) { - Assert.assertTrue(e.getCause() instanceof ResourceExhaustedException); - ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); - Assert.assertEquals( - StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); - } - - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof ResourceExhaustedException); + ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); + assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsNoRetryForResourceExhaustedWithRetryInfo() + void readRowsNoRetryForResourceExhaustedWithRetryInfo() throws ExecutionException, InterruptedException { RetryInfo retryInfo = RetryInfo.newBuilder() @@ -459,9 +442,9 @@ public RetryInfo parseBytes(byte[] serialized) { ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java index 0757e8fb59..f33d9b5b85 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java index a03f78bd39..6ebe39bbfa 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java index df4635effb..f9e7c03610 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java @@ -16,7 +16,7 @@ package com.google.cloud.bigquery.storage.v1beta1.it; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.ServiceOptions; @@ -36,17 +36,18 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Integration tests for BigQuery Storage API which target long running sessions. These tests can be * enabled by setting the system property 'bigquery.storage.enable_long_running_tests' to true. */ -public class ITBigQueryStorageLongRunningTest { +class ITBigQueryStorageLongRunningTest { private static final Logger LOG = Logger.getLogger(ITBigQueryStorageLongRunningTest.class.getName()); @@ -63,9 +64,10 @@ public class ITBigQueryStorageLongRunningTest { private static BigQueryStorageClient client; private static String parentProjectId; - @BeforeClass - public static void beforeClass() throws IOException { - Assume.assumeTrue(LONG_TESTS_DISABLED_MESSAGE, Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY)); + @BeforeAll + static void beforeAll() throws IOException { + Assumptions.assumeTrue( + Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY), LONG_TESTS_DISABLED_MESSAGE); client = BigQueryStorageClient.create(); parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); @@ -75,15 +77,16 @@ public static void beforeClass() throws IOException { ITBigQueryStorageLongRunningTest.class.getSimpleName(), parentProjectId)); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() throws InterruptedException { if (client != null) { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } } @Test - public void testLongRunningReadSession() throws InterruptedException, ExecutionException { + void testLongRunningReadSession() throws InterruptedException, ExecutionException { // This test reads a larger table with the goal of doing a simple validation of timeout settings // for a longer running session. @@ -100,26 +103,21 @@ public void testLongRunningReadSession() throws InterruptedException, ExecutionE /* parent= */ parentProjectId, /* requestedStreams= */ 5); assertEquals( + 5, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 5, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); List> tasks = new ArrayList<>(session.getStreamsCount()); for (final Stream stream : session.getStreamsList()) { - tasks.add( - new Callable() { - @Override - public Long call() throws Exception { - return readAllRowsFromStream(stream); - } - }); + tasks.add(() -> readAllRowsFromStream(stream)); } ExecutorService executor = Executors.newFixedThreadPool(tasks.size()); List> results = executor.invokeAll(tasks); + executor.shutdown(); long rowCount = 0; for (Future result : results) { diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java index bc772f0119..04191a38a4 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java @@ -18,11 +18,12 @@ import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; import com.google.api.gax.core.FixedCredentialsProvider; import com.google.api.gax.rpc.ServerStream; @@ -78,6 +79,8 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import org.apache.avro.Conversions; import org.apache.avro.LogicalTypes; @@ -85,12 +88,12 @@ import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.avro.util.Utf8; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** Integration tests for BigQuery Storage API. */ -public class ITBigQueryStorageTest { +class ITBigQueryStorageTest { private static final Logger LOG = Logger.getLogger(ITBigQueryStorageTest.class.getName()); private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); @@ -178,8 +181,8 @@ public class ITBigQueryStorageTest { + " \"universe_domain\": \"fake.domain\"\n" + "}"; - @BeforeClass - public static void beforeClass() throws IOException { + @BeforeAll + static void beforeAll() throws IOException { client = BigQueryStorageClient.create(); parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); @@ -196,10 +199,11 @@ public static void beforeClass() throws IOException { LOG.info("Created test dataset: " + DATASET); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() throws InterruptedException { if (client != null) { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } if (bigquery != null) { @@ -209,7 +213,7 @@ public static void afterClass() { } @Test - public void testSimpleRead() { + void testSimpleRead() { TableReference tableReference = TableReference.newBuilder() .setProjectId("bigquery-public-data") @@ -223,12 +227,12 @@ public void testSimpleRead() { /* parent= */ parentProjectId, /* requestedStreams= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); @@ -246,7 +250,7 @@ public void testSimpleRead() { } @Test - public void testSimpleReadArrow() { + void testSimpleReadArrow() { TableReference tableReference = TableReference.newBuilder() .setProjectId("bigquery-public-data") @@ -263,12 +267,12 @@ public void testSimpleReadArrow() { .build(); ReadSession session = client.createReadSession(request); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); @@ -287,9 +291,9 @@ public void testSimpleReadArrow() { } @Test - public void testRangeType() throws InterruptedException { + void testRangeType() throws InterruptedException { // Create table with Range values. - String tableName = "test_range_type"; + String tableName = "test_range_type" + UUID.randomUUID().toString().substring(0, 8); QueryJobConfiguration createTable = QueryJobConfiguration.newBuilder( String.format( @@ -321,12 +325,12 @@ public void testRangeType() throws InterruptedException { .build(); ReadSession session = client.createReadSession(createReadSessionRequestrequest); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); @@ -345,7 +349,7 @@ public void testRangeType() throws InterruptedException { } @Test - public void testSimpleReadAndResume() { + void testSimpleReadAndResume() { TableReference tableReference = TableReference.newBuilder() .setProjectId("bigquery-public-data") @@ -359,12 +363,12 @@ public void testSimpleReadAndResume() { /* parent= */ parentProjectId, /* requestedStreams= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); // We have to read some number of rows in order to be able to resume. More details: // https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1beta1#google.cloud.bigquery.storage.v1beta1.ReadRowsRequest @@ -389,7 +393,7 @@ public void testSimpleReadAndResume() { } @Test - public void testFilter() throws IOException { + void testFilter() throws IOException { TableReference tableReference = TableReference.newBuilder() .setProjectId("bigquery-public-data") @@ -411,12 +415,12 @@ public void testFilter() throws IOException { ReadSession session = client.createReadSession(request); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); @@ -449,7 +453,7 @@ public void accept(GenericData.Record record) { } @Test - public void testColumnSelection() throws IOException { + void testColumnSelection() throws IOException { TableReference tableReference = TableReference.newBuilder() .setProjectId("bigquery-public-data") @@ -475,12 +479,12 @@ public void testColumnSelection() throws IOException { ReadSession session = client.createReadSession(request); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); @@ -493,16 +497,18 @@ public void testColumnSelection() throws IOException { String actualSchemaMessage = String.format( "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); - assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size()); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); + assertEquals( + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, avroSchema.getField("word").schema().getType()); + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("word_count").schema().getType()); + avroSchema.getField("word_count").schema().getType(), + actualSchemaMessage); SimpleRowReader reader = new SimpleRowReader(avroSchema); @@ -531,7 +537,7 @@ public void accept(GenericData.Record record) { } @Test - public void testReadAtSnapshot() throws InterruptedException, IOException { + void testReadAtSnapshot() throws InterruptedException, IOException { Field intFieldSchema = Field.newBuilder("col", LegacySQLTypeName.INTEGER) .setMode(Mode.REQUIRED) @@ -587,8 +593,9 @@ public void accept(GenericData.Record record) { } @Test - public void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { - String partitionedTableName = "test_column_partition_table_by_date"; + void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { + String partitionedTableName = + "test_column_partition_table_by_date" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s (num_field INT64, date_field DATE) " @@ -615,19 +622,19 @@ public void testColumnPartitionedTableByDateField() throws InterruptedException, List unfilteredRows = ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); - assertEquals("Actual rows read: " + unfilteredRows.toString(), 3, unfilteredRows.size()); + assertEquals(3, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); List partitionFilteredRows = ReadAllRows( /* tableReference= */ tableReference, /* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)"); assertEquals( - "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size()); + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); } @Test - public void testIngestionTimePartitionedTable() throws InterruptedException, IOException { + void testIngestionTimePartitionedTable() throws InterruptedException, IOException { Field intFieldSchema = Field.newBuilder("num_field", LegacySQLTypeName.INTEGER) .setMode(Mode.REQUIRED) @@ -667,19 +674,19 @@ public void testIngestionTimePartitionedTable() throws InterruptedException, IOE List unfilteredRows = ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); - assertEquals("Actual rows read: " + unfilteredRows.toString(), 2, unfilteredRows.size()); + assertEquals(2, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); List partitionFilteredRows = ReadAllRows( /* tableReference= */ tableReference, /* filter= */ "_PARTITIONDATE > \"2019-01-01\""); assertEquals( - "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size()); + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); } @Test - public void testBasicSqlTypes() throws InterruptedException, IOException { - String table_name = "test_basic_sql_types"; + void testBasicSqlTypes() throws InterruptedException, IOException { + String tableName = "test_basic_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -700,20 +707,20 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { + " TRUE," + " \"String field value\"," + " b\"абвгд\"", - DATASET, table_name); + DATASET, tableName); RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); TableReference tableReference = TableReference.newBuilder() - .setTableId(table_name) + .setTableId(tableName) .setDatasetId(DATASET) .setProjectId(ServiceOptions.getDefaultProjectId()) .build(); List rows = ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -723,22 +730,22 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 6, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(6, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, avroSchema.getField("int_field").schema().getType()); - assertEquals(rowAssertMessage, 17L, (long) record.get("int_field")); + Schema.Type.LONG, avroSchema.getField("int_field").schema().getType(), actualSchemaMessage); + assertEquals(17L, (long) record.get("int_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BYTES, - avroSchema.getField("num_field").schema().getType()); + avroSchema.getField("num_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.decimal(/* precision= */ 38, /* scale= */ 9), - avroSchema.getField("num_field").schema().getLogicalType()); + avroSchema.getField("num_field").schema().getLogicalType(), + actualSchemaMessage); BigDecimal actual_num_field = new Conversions.DecimalConversion() .fromBytes( @@ -746,45 +753,46 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { avroSchema, avroSchema.getField("num_field").schema().getLogicalType()); assertEquals( - rowAssertMessage, BigDecimal.valueOf(/* unscaledVal= */ 1_234_560_000_000L, /* scale= */ 9), - actual_num_field); + actual_num_field, + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.DOUBLE, - avroSchema.getField("float_field").schema().getType()); + avroSchema.getField("float_field").schema().getType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, /* expected= */ 6.547678d, /* actual= */ (double) record.get("float_field"), - /* delta= */ 0.0001); + /* delta= */ 0.0001, + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BOOLEAN, - avroSchema.getField("bool_field").schema().getType()); - assertEquals(rowAssertMessage, true, record.get("bool_field")); + avroSchema.getField("bool_field").schema().getType(), + actualSchemaMessage); + assertEquals(true, record.get("bool_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("str_field").schema().getType()); - assertEquals(rowAssertMessage, new Utf8("String field value"), record.get("str_field")); + avroSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("String field value"), record.get("str_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BYTES, - avroSchema.getField("bytes_field").schema().getType()); + avroSchema.getField("bytes_field").schema().getType(), + actualSchemaMessage); assertArrayEquals( - rowAssertMessage, Utf8.getBytesFor("абвгд"), - ((ByteBuffer) (record.get("bytes_field"))).array()); + ((ByteBuffer) (record.get("bytes_field"))).array(), + rowAssertMessage); } @Test - public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { - String table_name = "test_date_and_time_sql_types"; + void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + String tableName = + "test_date_and_time_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -801,20 +809,20 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + " CAST(\"2019-04-30 21:47:59.999999\" AS DATETIME)," + " CAST(\"21:47:59.999999\" AS TIME)," + " CAST(\"2019-04-30 19:24:19.123456 UTC\" AS TIMESTAMP)", - DATASET, table_name); + DATASET, tableName); RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); TableReference tableReference = TableReference.newBuilder() - .setTableId(table_name) + .setTableId(tableName) .setDatasetId(DATASET) .setProjectId(ServiceOptions.getDefaultProjectId()) .build(); List rows = ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -824,56 +832,56 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 4, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(4, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.INT, avroSchema.getField("date_field").schema().getType()); + Schema.Type.INT, avroSchema.getField("date_field").schema().getType(), actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.date(), - avroSchema.getField("date_field").schema().getLogicalType()); + avroSchema.getField("date_field").schema().getLogicalType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, LocalDate.of(/* year= */ 2019, /* month= */ 5, /* dayOfMonth= */ 31), - LocalDate.ofEpochDay((int) record.get("date_field"))); + LocalDate.ofEpochDay((int) record.get("date_field")), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("datetime_field").schema().getType()); + avroSchema.getField("datetime_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, "datetime", - avroSchema.getField("datetime_field").schema().getObjectProp("logicalType")); + avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"), + actualSchemaMessage); assertEquals( - rowAssertMessage, new Utf8("2019-04-30T21:47:59.999999"), - (Utf8) record.get("datetime_field")); + (Utf8) record.get("datetime_field"), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("time_field").schema().getType()); + avroSchema.getField("time_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.timeMicros(), - avroSchema.getField("time_field").schema().getLogicalType()); + avroSchema.getField("time_field").schema().getLogicalType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, LocalTime.of( /* hour= */ 21, /* minute= */ 47, /* second= */ 59, /* nanoOfSecond= */ 999_999_000), - LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field"))); + LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("timestamp_field").schema().getType()); + avroSchema.getField("timestamp_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.timestampMicros(), - avroSchema.getField("timestamp_field").schema().getLogicalType()); + avroSchema.getField("timestamp_field").schema().getLogicalType(), + actualSchemaMessage); ZonedDateTime expected_timestamp = ZonedDateTime.parse( "2019-04-30T19:24:19Z", DateTimeFormatter.ISO_INSTANT.withZone(ZoneOffset.UTC)) @@ -885,12 +893,12 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { /* epochSecond= */ actual_timestamp_micros / 1_000_000, (actual_timestamp_micros % 1_000_000) * 1_000), ZoneOffset.UTC); - assertEquals(rowAssertMessage, expected_timestamp, actual_timestamp); + assertEquals(expected_timestamp, actual_timestamp, rowAssertMessage); } @Test - public void testGeographySqlType() throws InterruptedException, IOException { - String table_name = "test_geography_sql_type"; + void testGeographySqlType() throws InterruptedException, IOException { + String tableName = "test_geography_sql_type" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -900,20 +908,20 @@ public void testGeographySqlType() throws InterruptedException, IOException { + " ) " + "AS " + " SELECT ST_GEOGPOINT(1.1, 2.2)", - DATASET, table_name); + DATASET, tableName); RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); TableReference tableReference = TableReference.newBuilder() - .setTableId(table_name) + .setTableId(tableName) .setDatasetId(DATASET) .setProjectId(ServiceOptions.getDefaultProjectId()) .build(); List rows = ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -923,44 +931,45 @@ public void testGeographySqlType() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 1, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(1, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("geo_field").schema().getType()); + avroSchema.getField("geo_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, "GEOGRAPHY", - avroSchema.getField("geo_field").schema().getObjectProp("sqlType")); - assertEquals(rowAssertMessage, new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field")); + avroSchema.getField("geo_field").schema().getObjectProp("sqlType"), + actualSchemaMessage); + assertEquals(new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field"), rowAssertMessage); } @Test - public void testStructAndArraySqlTypes() throws InterruptedException, IOException { - String table_name = "test_struct_and_array_sql_types"; + void testStructAndArraySqlTypes() throws InterruptedException, IOException { + String tableName = + "test_struct_and_array_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s (array_field ARRAY, struct_field STRUCT NOT NULL) OPTIONS( description=\"a" + " table with array and time column types\" ) AS SELECT [1, 2, 3], " + " (10, 'abc')", - DATASET, table_name); + DATASET, tableName); RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build()); TableReference tableReference = TableReference.newBuilder() - .setTableId(table_name) + .setTableId(tableName) .setDatasetId(DATASET) .setProjectId(ServiceOptions.getDefaultProjectId()) .build(); List rows = ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -970,48 +979,47 @@ public void testStructAndArraySqlTypes() throws InterruptedException, IOExceptio "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.ARRAY, - avroSchema.getField("array_field").schema().getType()); + avroSchema.getField("array_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("array_field").schema().getElementType().getType()); + avroSchema.getField("array_field").schema().getElementType().getType(), + actualSchemaMessage); assertArrayEquals( - rowAssertMessage, new Long[] {1L, 2L, 3L}, - ((GenericData.Array) record.get("array_field")).toArray(new Long[0])); + ((GenericData.Array) record.get("array_field")).toArray(new Long[0]), + rowAssertMessage); // Validate the STRUCT field and its members. Schema structSchema = avroSchema.getField("struct_field").schema(); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, structSchema.getType()); + assertEquals(Schema.Type.RECORD, structSchema.getType(), actualSchemaMessage); GenericData.Record structRecord = (GenericData.Record) record.get("struct_field"); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - structSchema.getField("int_field").schema().getType()); - assertEquals(rowAssertMessage, 10L, (long) structRecord.get("int_field")); + structSchema.getField("int_field").schema().getType(), + actualSchemaMessage); + assertEquals(10L, (long) structRecord.get("int_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - structSchema.getField("str_field").schema().getType()); - assertEquals(rowAssertMessage, new Utf8("abc"), structRecord.get("str_field")); + structSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("abc"), structRecord.get("str_field"), rowAssertMessage); } @Test - public void testUniverseDomainWithInvalidUniverseDomain() throws IOException { + void testUniverseDomainWithInvalidUniverseDomain() throws IOException { BigQueryStorageSettings bigQueryStorageSettings = BigQueryStorageSettings.newBuilder() .setCredentialsProvider( - FixedCredentialsProvider.create( - loadCredentials(FAKE_JSON_CRED_WITH_INVALID_DOMAIN))) + FixedCredentialsProvider.create(loadCredentials(FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN))) .setUniverseDomain("invalid.domain") .build(); @@ -1024,23 +1032,23 @@ public void testUniverseDomainWithInvalidUniverseDomain() throws IOException { .setTableId("shakespeare") .build(); - try { - localClient.createReadSession( - /* tableReference= */ tableReference, - /* parent= */ parentProjectId, - /* requestedStreams= */ 1); - fail("RPCs to invalid universe domain should fail"); - } catch (UnauthenticatedException e) { - assertThat( - (e.getMessage() - .contains("does not match the universe domain found in the credentials"))) - .isTrue(); - } + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); localClient.close(); } @Test - public void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { + void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { BigQueryStorageSettings bigQueryStorageSettings = BigQueryStorageSettings.newBuilder() .setCredentialsProvider( @@ -1058,23 +1066,23 @@ public void testInvalidUniverseDomainWithMismatchCredentials() throws IOExceptio .setTableId("shakespeare") .build(); - try { - localClient.createReadSession( - /* tableReference= */ tableReference, - /* parent= */ parentProjectId, - /* requestedStreams= */ 1); - fail("RPCs to invalid universe domain should fail"); - } catch (UnauthenticatedException e) { - assertThat( - (e.getMessage() - .contains("does not match the universe domain found in the credentials"))) - .isTrue(); - } + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* tableReference= */ tableReference, + /* parent= */ parentProjectId, + /* requestedStreams= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); localClient.close(); } @Test - public void testUniverseDomainWithMatchingDomain() throws IOException { + void testUniverseDomainWithMatchingDomain() throws IOException { // Test a valid domain using the default credentials and Google default universe domain. BigQueryStorageSettings bigQueryStorageSettings = BigQueryStorageSettings.newBuilder().setUniverseDomain("googleapis.com").build(); @@ -1094,12 +1102,12 @@ public void testUniverseDomainWithMatchingDomain() throws IOException { /* requestedStreams= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); @@ -1117,10 +1125,10 @@ public void testUniverseDomainWithMatchingDomain() throws IOException { localClient.close(); } - public void testUniverseDomain() throws IOException { + void testUniverseDomain() throws IOException { // This test is not yet part presubmit integration test as it requires the apis-tpclp.goog // universe domain credentials. - // Test a valid read session in the universe domain gdutst. + // Test a valid domain using the default credentials and Google default universe domain. BigQueryStorageSettings bigQueryStorageSettings = BigQueryStorageSettings.newBuilder().setUniverseDomain("apis-tpclp.goog").build(); BigQueryStorageClient localClient = BigQueryStorageClient.create(bigQueryStorageSettings); @@ -1224,12 +1232,12 @@ private void ProcessRowsAtSnapshot( ReadSession session = client.createReadSession(createSessionRequestBuilder.build()); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table reference '%s' CreateReadSession" + " response:%n%s", - TextFormat.printer().shortDebugString(tableReference), session.toString()), - 1, - session.getStreamsCount()); + TextFormat.printer().shortDebugString(tableReference), session.toString())); StreamPosition readPosition = StreamPosition.newBuilder().setStream(session.getStreams(0)).build(); @@ -1308,16 +1316,15 @@ private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration) assertNotNull(completedJob); assertNull( + /* object= */ completedJob.getStatus().getError(), /* message= */ "Received a job status that is not a success: " - + completedJob.getStatus().toString(), - /* object= */ completedJob.getStatus().getError()); + + completedJob.getStatus().toString()); return completedJob; } static ServiceAccountCredentials loadCredentials(String credentialFile) { - try { - InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes()); + try (InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes())) { return ServiceAccountCredentials.fromStream(keyStream); } catch (IOException e) { fail("Couldn't create fake JSON credentials."); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java index 2520933863..5b685fddcd 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java @@ -62,7 +62,7 @@ public SimpleRowReader(Schema schema) { * @param avroRows object returned from the ReadRowsResponse. * @param rowConsumer consumer that accepts GenericRecord. */ - public void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException { + void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException { Preconditions.checkNotNull(avroRows); Preconditions.checkNotNull(rowConsumer); decoder = diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java index a81a1da255..ac42094776 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java @@ -36,16 +36,13 @@ import com.google.protobuf.Empty; import java.time.Duration; import java.util.Set; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@RunWith(JUnit4.class) public class EnhancedBigQueryStorageStubSettingsTest { @Test - public void testSettingsArePreserved() { + void testSettingsArePreserved() { String endpoint = "some.other.host:123"; CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class); Duration watchdogInterval = Duration.ofSeconds(12); @@ -104,14 +101,14 @@ private void verifySettings( } @Test - public void testCreateReadSessionSettings() { + void testCreateReadSessionSettings() { UnaryCallSettings.Builder builder = EnhancedBigQueryStorageStubSettings.newBuilder().createReadSessionSettings(); verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); } @Test - public void testReadRowsSettings() { + void testReadRowsSettings() { ServerStreamingCallSettings.Builder builder = EnhancedBigQueryStorageStubSettings.newBuilder().readRowsSettings(); assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE); @@ -127,7 +124,7 @@ public void testReadRowsSettings() { } @Test - public void testBatchCreateReadSessionStreamsSettings() { + void testBatchCreateReadSessionStreamsSettings() { UnaryCallSettings.Builder< BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse> builder = @@ -137,14 +134,14 @@ public void testBatchCreateReadSessionStreamsSettings() { } @Test - public void testFinalizeStreamSettings() { + void testFinalizeStreamSettings() { UnaryCallSettings.Builder builder = EnhancedBigQueryStorageStubSettings.newBuilder().finalizeStreamSettings(); verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); } @Test - public void testSplitReadStreamSettings() { + void testSplitReadStreamSettings() { UnaryCallSettings.Builder builder = EnhancedBigQueryStorageStubSettings.newBuilder().splitReadStreamSettings(); verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java index c2e2df1ebb..b319042b59 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java @@ -31,15 +31,12 @@ import com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition; import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference; import java.util.regex.Pattern; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + public class ResourceHeaderTest { private static final TableReference TEST_TABLE_REFERENCE = @@ -54,7 +51,7 @@ public class ResourceHeaderTest { private static final Stream TEST_STREAM = Stream.newBuilder().setName("streamName").build(); - private static final String NAME = "resource-header-test:123"; + private static final String NAME = "resource-header-test:123-v1beta1"; private static final String HEADER_NAME = "x-goog-request-params"; @@ -80,14 +77,14 @@ public class ResourceHeaderTest { private LocalChannelProvider channelProvider; private BigQueryStorageClient client; - @BeforeClass + @BeforeAll public static void setUpClass() throws Exception { server = new InProcessServer<>(new BigQueryStorageImplBase() {}, NAME); server.start(); } - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { channelProvider = LocalChannelProvider.create(NAME); BigQueryStorageSettings.Builder settingsBuilder = BigQueryStorageSettings.newBuilder() @@ -97,19 +94,19 @@ public void setUp() throws Exception { client = BigQueryStorageClient.create(settingsBuilder.build()); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); } - @AfterClass - public static void tearDownClass() throws Exception { + @AfterAll + static void tearDownClass() throws Exception { server.stop(); server.blockUntilShutdown(); } @Test - public void createReadSessionTest() { + void createReadSessionTest() { try { client.createReadSession(TEST_TABLE_REFERENCE, "parents/project", 1); } catch (UnimplementedException e) { @@ -119,7 +116,7 @@ public void createReadSessionTest() { } @Test - public void readRowsTest() { + void readRowsTest() { try { ReadRowsRequest request = ReadRowsRequest.newBuilder() @@ -134,7 +131,7 @@ public void readRowsTest() { } @Test - public void batchCreateReadStreamsForSessionTest() { + void batchCreateReadStreamsForSessionTest() { try { client.batchCreateReadSessionStreams(TEST_SESSION, 1); } catch (UnimplementedException e) { @@ -145,7 +142,7 @@ public void batchCreateReadStreamsForSessionTest() { } @Test - public void finalizeStreamTest() { + void finalizeStreamTest() { try { client.finalizeStream(TEST_STREAM); } catch (UnimplementedException e) { @@ -156,7 +153,7 @@ public void finalizeStreamTest() { } @Test - public void splitReadStreamTest() { + void splitReadStreamTest() { try { client.splitReadStream(TEST_STREAM); } catch (UnimplementedException e) { diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java index 714e30b390..5f734ae746 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java @@ -15,9 +15,12 @@ */ package com.google.cloud.bigquery.storage.v1beta1.stub.readrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.GrpcTransportChannel; -import com.google.api.gax.rpc.FixedTransportChannelProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient; import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageGrpc.BigQueryStorageImplBase; @@ -29,52 +32,49 @@ import com.google.common.collect.Queues; import io.grpc.Status.Code; import io.grpc.stub.StreamObserver; -import io.grpc.testing.GrpcServerRule; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Queue; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import java.util.UUID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; -@RunWith(MockitoJUnitRunner.class) -public class ReadRowsRetryTest { +class ReadRowsRetryTest { private static final Stream DEFAULT_STREAM = Stream.newBuilder().setName("defaultStream").build(); - @Rule public GrpcServerRule serverRule = new GrpcServerRule(); - private TestBigQueryStorageService service; private BigQueryStorageClient client; + private InProcessServer server; + private LocalChannelProvider channelProvider; - @Before - public void setUp() throws IOException { + @BeforeEach + void setUp() throws Exception { service = new TestBigQueryStorageService(); - serverRule.getServiceRegistry().addService(service); + String serverName = UUID.randomUUID().toString(); + server = new InProcessServer<>(service, serverName); + server.start(); + channelProvider = LocalChannelProvider.create(serverName); BigQueryStorageSettings settings = BigQueryStorageSettings.newBuilder() .setCredentialsProvider(NoCredentialsProvider.create()) - .setTransportChannelProvider( - FixedTransportChannelProvider.create( - GrpcTransportChannel.create(serverRule.getChannel()))) + .setTransportChannelProvider(channelProvider) .build(); client = BigQueryStorageClient.create(settings); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); + server.stop(); + server.blockUntilShutdown(); } @Test - public void happyPathTest() { + void happyPathTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -82,11 +82,11 @@ public void happyPathTest() { .respondWithNumberOfRows(10) .respondWithNumberOfRows(7)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } @Test - public void immediateRetryTest() { + void immediateRetryTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -99,11 +99,11 @@ public void immediateRetryTest() { .respondWithNumberOfRows(10) .respondWithNumberOfRows(7)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } @Test - public void multipleRetryTestWithZeroInitialOffset() { + void multipleRetryTestWithZeroInitialOffset() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -121,11 +121,11 @@ public void multipleRetryTestWithZeroInitialOffset() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6)); - Assert.assertEquals(28, getRowCount(request)); + assertEquals(28, getRowCount(request)); } @Test - public void multipleRetryTestWithNonZeroInitialOffset() { + void multipleRetryTestWithNonZeroInitialOffset() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17); service.expectations.add( RpcExpectation.create() @@ -143,11 +143,11 @@ public void multipleRetryTestWithNonZeroInitialOffset() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3)); - Assert.assertEquals(25, getRowCount(request)); + assertEquals(25, getRowCount(request)); } @Test - public void errorAtTheVeryEndTest() { + void errorAtTheVeryEndTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -159,7 +159,7 @@ public void errorAtTheVeryEndTest() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } private int getRowCount(ReadRowsRequest request) { @@ -183,17 +183,15 @@ public void readRows( RpcExpectation expectedRpc = expectations.poll(); currentRequestIndex++; - Assert.assertNotNull( - "Unexpected request #" + currentRequestIndex + ": " + request.toString(), expectedRpc); - - Assert.assertEquals( + assertNotNull( + expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString()); + assertEquals( + expectedRpc.expectedRequest, + request, "Expected request #" + currentRequestIndex + " does not match actual request: " - + request.toString(), - expectedRpc.expectedRequest, - request); - + + request.toString()); for (ReadRowsResponse response : expectedRpc.responses) { responseObserver.onNext(response); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptorTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptorTest.java deleted file mode 100644 index 8e08418237..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptorTest.java +++ /dev/null @@ -1,527 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import static org.junit.Assert.*; - -import com.google.cloud.bigquery.storage.test.JsonTest.*; -import com.google.cloud.bigquery.storage.test.SchemaTest.*; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.Descriptors.FieldDescriptor; -import java.util.HashMap; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class BQTableSchemaToProtoDescriptorTest { - // This is a map between the TableFieldSchema.Type and the descriptor it is supposed to - // produce. The produced descriptor will be used to check against the entry values here. - private static ImmutableMap - BQTableTypeToCorrectProtoDescriptorTest = - new ImmutableMap.Builder() - .put(TableFieldSchema.Type.BOOL, BoolType.getDescriptor()) - .put(TableFieldSchema.Type.BYTES, BytesType.getDescriptor()) - .put(TableFieldSchema.Type.DATE, Int32Type.getDescriptor()) - .put(TableFieldSchema.Type.DATETIME, Int64Type.getDescriptor()) - .put(TableFieldSchema.Type.DOUBLE, DoubleType.getDescriptor()) - .put(TableFieldSchema.Type.GEOGRAPHY, StringType.getDescriptor()) - .put(TableFieldSchema.Type.INT64, Int64Type.getDescriptor()) - .put(TableFieldSchema.Type.NUMERIC, BytesType.getDescriptor()) - .put(TableFieldSchema.Type.STRING, StringType.getDescriptor()) - .put(TableFieldSchema.Type.TIME, Int64Type.getDescriptor()) - .put(TableFieldSchema.Type.TIMESTAMP, Int64Type.getDescriptor()) - .build(); - - // Creates mapping from descriptor to how many times it was reused. - private void mapDescriptorToCount(Descriptor descriptor, HashMap map) { - for (FieldDescriptor field : descriptor.getFields()) { - if (field.getType() == FieldDescriptor.Type.MESSAGE) { - Descriptor subDescriptor = field.getMessageType(); - String messageName = subDescriptor.getName(); - if (map.containsKey(messageName)) { - map.put(messageName, map.get(messageName) + 1); - } else { - map.put(messageName, 1); - } - mapDescriptorToCount(subDescriptor, map); - } - } - } - - private void isDescriptorEqual(Descriptor convertedProto, Descriptor originalProto) { - // Check same number of fields - assertEquals(convertedProto.getFields().size(), originalProto.getFields().size()); - for (FieldDescriptor convertedField : convertedProto.getFields()) { - // Check field name - FieldDescriptor originalField = originalProto.findFieldByName(convertedField.getName()); - assertNotNull(originalField); - // Check type - FieldDescriptor.Type convertedType = convertedField.getType(); - FieldDescriptor.Type originalType = originalField.getType(); - assertEquals(convertedField.getName(), convertedType, originalType); - // Check mode - assertTrue( - (originalField.isRepeated() == convertedField.isRepeated()) - && (originalField.isRequired() == convertedField.isRequired()) - && (originalField.isOptional() == convertedField.isOptional())); - // Recursively check nested messages - if (convertedType == FieldDescriptor.Type.MESSAGE) { - isDescriptorEqual(convertedField.getMessageType(), originalField.getMessageType()); - } - } - } - - @Test - public void testSimpleTypes() throws Exception { - for (Map.Entry entry : - BQTableTypeToCorrectProtoDescriptorTest.entrySet()) { - final TableFieldSchema tableFieldSchema = - TableFieldSchema.newBuilder() - .setType(entry.getKey()) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_field_type") - .build(); - final TableSchema tableSchema = - TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, entry.getValue()); - } - } - - @Test - public void testStructSimple() throws Exception { - final TableFieldSchema StringType = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_field_type") - .build(); - final TableFieldSchema tableFieldSchema = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_field_type") - .addFields(0, StringType) - .build(); - final TableSchema tableSchema = TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, MessageType.getDescriptor()); - } - - @Test - public void testStructComplex() throws Exception { - final TableFieldSchema test_int = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_int") - .build(); - final TableFieldSchema test_string = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_string") - .build(); - final TableFieldSchema test_bytes = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.BYTES) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("test_bytes") - .build(); - final TableFieldSchema test_bool = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.BOOL) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bool") - .build(); - final TableFieldSchema test_double = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DOUBLE) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_double") - .build(); - final TableFieldSchema test_date = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DATE) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("test_date") - .build(); - final TableFieldSchema test_datetime = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DATETIME) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_datetime") - .build(); - final TableFieldSchema test_datetime_str = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DATETIME) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_datetime_str") - .build(); - final TableFieldSchema ComplexLvl2 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.REQUIRED) - .addFields(0, test_int) - .setName("complex_lvl2") - .build(); - final TableFieldSchema ComplexLvl1 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.REQUIRED) - .addFields(0, test_int) - .addFields(1, ComplexLvl2) - .setName("complex_lvl1") - .build(); - final TableFieldSchema TEST_NUMERIC = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric") - .build(); - final TableFieldSchema TEST_GEO = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.GEOGRAPHY) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_geo") - .build(); - final TableFieldSchema TEST_TIMESTAMP = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.TIMESTAMP) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_timestamp") - .build(); - final TableFieldSchema TEST_TIME = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.TIME) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_time") - .build(); - final TableFieldSchema TEST_TIME_STR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.TIME) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_time_str") - .build(); - final TableFieldSchema TEST_NUMERIC_REPEATED = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_numeric_repeated") - .build(); - final TableFieldSchema TEST_NUMERIC_STR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_str") - .build(); - final TableFieldSchema TEST_NUMERIC_SHORT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_short") - .build(); - final TableFieldSchema TEST_NUMERIC_INT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_int") - .build(); - final TableFieldSchema TEST_NUMERIC_LONG = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_long") - .build(); - final TableFieldSchema TEST_NUMERIC_FLOAT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_float") - .build(); - final TableFieldSchema TEST_NUMERIC_DOUBLE = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_double") - .build(); - final TableFieldSchema TEST_BIGNUMERIC = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric") - .build(); - final TableFieldSchema TEST_BIGNUMERIC_STR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_bignumeric_str") - .build(); - - final TableFieldSchema TEST_BIGNUMERIC_SHORT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_short") - .build(); - final TableFieldSchema TEST_BIGNUMERIC_INT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_int") - .build(); - final TableFieldSchema TEST_BIGNUMERIC_LONG = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_long") - .build(); - final TableFieldSchema TEST_BIGNUMERIC_FLOAT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_float") - .build(); - final TableFieldSchema TEST_BIGNUMERIC_DOUBLE = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_double") - .build(); - final TableFieldSchema TEST_INTERVAL = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INTERVAL) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_interval") - .build(); - final TableFieldSchema TEST_JSON = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.JSON) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_json") - .build(); - final TableSchema tableSchema = - TableSchema.newBuilder() - .addFields(0, test_int) - .addFields(1, test_string) - .addFields(2, test_bytes) - .addFields(3, test_bool) - .addFields(4, test_double) - .addFields(5, test_date) - .addFields(6, test_datetime) - .addFields(7, test_datetime_str) - .addFields(8, ComplexLvl1) - .addFields(9, ComplexLvl2) - .addFields(10, TEST_NUMERIC) - .addFields(11, TEST_GEO) - .addFields(12, TEST_TIMESTAMP) - .addFields(13, TEST_TIME) - .addFields(14, TEST_TIME_STR) - .addFields(15, TEST_NUMERIC_REPEATED) - .addFields(16, TEST_NUMERIC_STR) - .addFields(17, TEST_NUMERIC_SHORT) - .addFields(18, TEST_NUMERIC_INT) - .addFields(19, TEST_NUMERIC_LONG) - .addFields(20, TEST_NUMERIC_FLOAT) - .addFields(21, TEST_NUMERIC_DOUBLE) - .addFields(22, TEST_BIGNUMERIC) - .addFields(23, TEST_BIGNUMERIC_STR) - .addFields(24, TEST_BIGNUMERIC_SHORT) - .addFields(25, TEST_BIGNUMERIC_INT) - .addFields(26, TEST_BIGNUMERIC_FLOAT) - .addFields(27, TEST_BIGNUMERIC_DOUBLE) - .addFields(28, TEST_BIGNUMERIC_LONG) - .addFields(29, TEST_INTERVAL) - .addFields(30, TEST_JSON) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, ComplexRoot.getDescriptor()); - } - - @Test - public void testCasingComplexStruct() throws Exception { - final TableFieldSchema required = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("tEsT_ReQuIrEd") - .build(); - final TableFieldSchema repeated = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("tESt_repEATed") - .build(); - final TableFieldSchema optional = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_opTIONal") - .build(); - final TableFieldSchema test_int = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("TEST_INT") - .build(); - final TableFieldSchema test_string = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("TEST_STRING") - .build(); - final TableFieldSchema test_bytes = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.BYTES) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("TEST_BYTES") - .build(); - final TableFieldSchema test_bool = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.BOOL) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("TEST_BOOL") - .build(); - final TableFieldSchema test_double = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DOUBLE) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("TEST_DOUBLE") - .build(); - final TableFieldSchema test_date = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DATE) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("TEST_DATE") - .build(); - final TableFieldSchema option_test = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.REQUIRED) - .addFields(0, required) - .addFields(1, repeated) - .addFields(2, optional) - .setName("option_test") - .build(); - final TableSchema tableSchema = - TableSchema.newBuilder() - .addFields(0, test_int) - .addFields(1, test_string) - .addFields(2, test_bytes) - .addFields(3, test_bool) - .addFields(4, test_double) - .addFields(5, test_date) - .addFields(6, option_test) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, CasingComplex.getDescriptor()); - } - - @Test - public void testOptions() throws Exception { - final TableFieldSchema required = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("test_required") - .build(); - final TableFieldSchema repeated = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_repeated") - .build(); - final TableFieldSchema optional = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_optional") - .build(); - final TableSchema tableSchema = - TableSchema.newBuilder() - .addFields(0, required) - .addFields(1, repeated) - .addFields(2, optional) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, OptionTest.getDescriptor()); - } - - @Test - public void testDescriptorReuseDuringCreation() throws Exception { - final TableFieldSchema test_int = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_int") - .build(); - final TableFieldSchema reuse_lvl2 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl2") - .addFields(0, test_int) - .build(); - final TableFieldSchema reuse_lvl1 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl1") - .addFields(0, test_int) - .addFields(0, reuse_lvl2) - .build(); - final TableFieldSchema reuse_lvl1_1 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl1_1") - .addFields(0, test_int) - .addFields(0, reuse_lvl2) - .build(); - final TableFieldSchema reuse_lvl1_2 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl1_2") - .addFields(0, test_int) - .addFields(0, reuse_lvl2) - .build(); - final TableSchema tableSchema = - TableSchema.newBuilder() - .addFields(0, reuse_lvl1) - .addFields(1, reuse_lvl1_1) - .addFields(2, reuse_lvl1_2) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - HashMap descriptorToCount = new HashMap(); - mapDescriptorToCount(descriptor, descriptorToCount); - assertEquals(descriptorToCount.size(), 2); - assertTrue(descriptorToCount.containsKey("root__reuse_lvl1")); - assertEquals(descriptorToCount.get("root__reuse_lvl1").intValue(), 3); - assertTrue(descriptorToCount.containsKey("root__reuse_lvl1__reuse_lvl2")); - assertEquals(descriptorToCount.get("root__reuse_lvl1__reuse_lvl2").intValue(), 3); - isDescriptorEqual(descriptor, ReuseRoot.getDescriptor()); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java index d3c9d7ac92..abf8927eb3 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java index f73a0e1549..5891641986 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java @@ -16,47 +16,40 @@ package com.google.cloud.bigquery.storage.v1beta2; +import static org.junit.jupiter.api.Assertions.assertEquals; + import com.google.protobuf.ByteString; import java.math.BigDecimal; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; -@RunWith(JUnit4.class) -public class BigDecimalByteStringEncoderTest { +class BigDecimalByteStringEncoderTest { @Test - public void testEncodeBigDecimalandEncodeByteString() { + void testEncodeBigDecimalandEncodeByteString() { BigDecimal testBD = new BigDecimal("0"); // expected result bd ByteString testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); // convert expected to bs BigDecimal resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); // convert bs to bd - Assert.assertEquals( - 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd testBD = new BigDecimal("1.2"); testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); - Assert.assertEquals( - 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd testBD = new BigDecimal("-1.2"); testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); - Assert.assertEquals( - 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd testBD = new BigDecimal("99999999999999999999999999999.999999999"); testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); - Assert.assertEquals( - 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd testBD = new BigDecimal("-99999999999999999999999999999.999999999"); testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS); - Assert.assertEquals( - 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd + assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java index 6485d6ab55..b274569b95 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java @@ -15,6 +15,10 @@ */ package com.google.cloud.bigquery.storage.v1beta2; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + import com.google.api.gax.core.NoCredentialsProvider; import com.google.api.gax.grpc.GaxGrpcProperties; import com.google.api.gax.grpc.GrpcStatusCode; @@ -42,14 +46,17 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -public class BigQueryReadClientTest { +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; + +@Execution(ExecutionMode.SAME_THREAD) +class BigQueryReadClientTest { private static MockBigQueryRead mockBigQueryRead; private static MockServiceHelper serviceHelper; private BigQueryReadClient client; @@ -57,8 +64,8 @@ public class BigQueryReadClientTest { private int retryCount; private Code lastRetryStatusCode; - @BeforeClass - public static void startStaticServer() { + @BeforeAll + static void startStaticServer() { mockBigQueryRead = new MockBigQueryRead(); serviceHelper = new MockServiceHelper( @@ -66,13 +73,13 @@ public static void startStaticServer() { serviceHelper.start(); } - @AfterClass - public static void stopServer() { + @AfterAll + static void stopServer() { serviceHelper.stop(); } - @Before - public void setUp() throws IOException { + @BeforeEach + void setUp() throws IOException { serviceHelper.reset(); channelProvider = serviceHelper.createChannelProvider(); retryCount = 0; @@ -95,14 +102,15 @@ public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) { client = BigQueryReadClient.create(settings); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); + client.awaitTermination(10, TimeUnit.SECONDS); } @Test @SuppressWarnings("all") - public void createReadSessionTest() { + void createReadSessionTest() { String name = "name3373707"; String table = "table110115790"; ReadSession expectedResponse = ReadSession.newBuilder().setName(name).setTable(table).build(); @@ -113,16 +121,16 @@ public void createReadSessionTest() { int maxStreamCount = 940837515; ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); - Assert.assertEquals(expectedResponse, actualResponse); + assertEquals(expectedResponse, actualResponse); List actualRequests = mockBigQueryRead.getRequests(); - Assert.assertEquals(1, actualRequests.size()); + assertEquals(1, actualRequests.size()); CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0); - Assert.assertEquals(parent, actualRequest.getParent()); - Assert.assertEquals(readSession, actualRequest.getReadSession()); - Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); - Assert.assertTrue( + assertEquals(parent, actualRequest.getParent()); + assertEquals(readSession, actualRequest.getReadSession()); + assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); @@ -130,25 +138,22 @@ public void createReadSessionTest() { @Test @SuppressWarnings("all") - public void createReadSessionExceptionTest() throws Exception { + void createReadSessionExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryRead.addException(exception); - try { - String parent = "parent-995424086"; - ReadSession readSession = ReadSession.newBuilder().build(); - int maxStreamCount = 940837515; + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; - client.createReadSession(parent, readSession, maxStreamCount); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception - } + assertThrows( + InvalidArgumentException.class, + () -> client.createReadSession(parent, readSession, maxStreamCount)); } @Test @SuppressWarnings("all") - public void readRowsTest() throws Exception { + void readRowsTest() throws Exception { long rowCount = 1340416618L; ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); mockBigQueryRead.addResponse(expectedResponse); @@ -160,16 +165,16 @@ public void readRowsTest() throws Exception { callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); - Assert.assertEquals(expectedResponse, actualResponses.get(0)); + assertEquals(1, actualResponses.size()); + assertEquals(expectedResponse, actualResponses.get(0)); - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsExceptionTest() throws Exception { + void readRowsExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockBigQueryRead.addException(exception); ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); @@ -179,22 +184,19 @@ public void readRowsExceptionTest() throws Exception { ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); - try { - List actualResponses = responseObserver.future().get(); - Assert.fail("No exception thrown"); - } catch (ExecutionException e) { - Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); - InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); - Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); - } - - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { + void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException { ApiException exception = new InternalException( new StatusRuntimeException( @@ -213,15 +215,15 @@ public void readRowsRetryingEOSExceptionTest() throws ExecutionException, Interr ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); } @Test @SuppressWarnings("all") - public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { + void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException { ApiException exception = new InternalException( new StatusRuntimeException( @@ -240,15 +242,15 @@ public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, Inte ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.INTERNAL); } @Test @SuppressWarnings("all") - public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() + void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() throws ExecutionException, InterruptedException { ApiException exception = new ResourceExhaustedException( @@ -267,23 +269,19 @@ public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo() ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); - try { - List actualResponses = responseObserver.future().get(); - Assert.fail("No exception thrown"); - } catch (ExecutionException e) { - Assert.assertTrue(e.getCause() instanceof ResourceExhaustedException); - ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); - Assert.assertEquals( - StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); - } - - Assert.assertEquals(retryCount, 0); - Assert.assertEquals(lastRetryStatusCode, Code.OK); + ExecutionException e = + assertThrows(ExecutionException.class, () -> responseObserver.future().get()); + assertTrue(e.getCause() instanceof ResourceExhaustedException); + ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause(); + assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode()); + + assertEquals(retryCount, 0); + assertEquals(lastRetryStatusCode, Code.OK); } @Test @SuppressWarnings("all") - public void readRowsNoRetryForResourceExhaustedWithRetryInfo() + void readRowsNoRetryForResourceExhaustedWithRetryInfo() throws ExecutionException, InterruptedException { RetryInfo retryInfo = RetryInfo.newBuilder() @@ -329,9 +327,9 @@ public RetryInfo parseBytes(byte[] serialized) { ServerStreamingCallable callable = client.readRowsCallable(); callable.serverStreamingCall(request, responseObserver); List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); + assertEquals(1, actualResponses.size()); - Assert.assertEquals(retryCount, 1); - Assert.assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); + assertEquals(retryCount, 1); + assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClientTest.java index ad261c2175..eccb9ed658 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteClientTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoderTest.java deleted file mode 100644 index 393babd189..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoderTest.java +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1beta2; - -import static org.junit.Assert.assertEquals; - -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.util.logging.Logger; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class CivilTimeEncoderTest { - private static final Logger LOG = Logger.getLogger(CivilTimeEncoderTest.class.getName()); - - // Time - @Test - public void encodeAndDecodePacked64TimeMicros_validTime() { - // 00:00:00.000000 - // 0b000000000000000000000000000|00000|000000|000000|00000000000000000000 - // 0x0 - assertEquals( - 0x0L, CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(0, 0, 0, 0))); - assertEquals( - LocalTime.of(0, 0, 0, 0), CivilTimeEncoder.decodePacked64TimeMicrosLocalTime(0x0L)); - - // 00:01:02.003000 - // 0b000000000000000000000000000|00000|000001|000010|00000000101110111000 - // 0x4200BB8 - assertEquals( - 0x4200BB8L, - CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(0, 1, 2, 3_000_000))); - assertEquals( - LocalTime.of(0, 1, 2, 3_000_000), - CivilTimeEncoder.decodePacked64TimeMicrosLocalTime(0x4200BB8L)); - - // 12:00:00.000000 - // 0b000000000000000000000000000|01100|000000|000000|00000000000000000000 - // 0xC00000000 - assertEquals( - 0xC00000000L, - CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(12, 0, 0, 0))); - assertEquals( - LocalTime.of(12, 0, 0, 0), - CivilTimeEncoder.decodePacked64TimeMicrosLocalTime(0xC00000000L)); - - // 13:14:15.016000 - // 0b000000000000000000000000000|01101|001110|001111|00000011111010000000 - // 0xD38F03E80 - assertEquals( - 0xD38F03E80L, - CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(13, 14, 15, 16_000_000))); - assertEquals( - LocalTime.of(13, 14, 15, 16_000_000), - CivilTimeEncoder.decodePacked64TimeMicrosLocalTime(0xD38F03E80L)); - - // 23:59:59.999000 - // 0b000000000000000000000000000|10111|111011|111011|11110011111001011000 - // 0x17EFBF3E58 - assertEquals( - 0x17EFBF3E58L, - CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(23, 59, 59, 999_000_000))); - assertEquals( - LocalTime.of(23, 59, 59, 999_000_000), - CivilTimeEncoder.decodePacked64TimeMicrosLocalTime(0x17EFBF3E58L)); - } - - @Test - public void encodePacked64TimeMicros_giveErrorWhenPrecisionIsLost() { - try { // 00:00:00.000000999 - // 0b000000000000000000000000000|00000|000000|000000|00000000000000000000 - // 0x0 - assertEquals( - 0x0L, CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(0, 0, 0, 999))); - Assert.fail(); - } catch (IllegalArgumentException e) { - } - } - - @Test - public void decodePacked64TimeMicros_invalidBitField_throwsIllegalArgumentException() { - try { - // 00:00:00.000000 - // 0b000000000000000000000000001|00000|000000|000000|00000000000000000000 - // 0x2000000000 - CivilTimeEncoder.decodePacked64TimeMicros(0x2000000000L); - Assert.fail(); - } catch (IllegalArgumentException e) { - assertEquals(null, e.getMessage()); - } - } - - @Test - public void decodePacked64TimeMicros_invalidMicroOfSecond_throwsIllegalArgumentException() { - try { - // 00:00:00.1000000 - // 0b000000000000000000000000000|00000|000000|000000|11110100001001000000 - // 0xF4240 - CivilTimeEncoder.decodePacked64TimeMicros(0xF4240L); - Assert.fail(); - } catch (IllegalArgumentException e) { - assertEquals(null, e.getMessage()); - } - } - - @Test - public void decodePacked64TimeMicros_invalidSecondOfMinute_throwsIllegalArgumentException() { - try { - // 00:00:60.000000 - // 0b000000000000000000000000000|00000|000000|111100|00000000000000000000 - // 0x3C00000 - CivilTimeEncoder.decodePacked64TimeMicros(0x3C00000L); - Assert.fail(); - } catch (IllegalArgumentException e) { - assertEquals("Invalid value for SecondOfMinute (valid values 0 - 59): 60", e.getMessage()); - } - } - - @Test - public void decodePacked64TimeMicros_invalidMinuteOfHour_throwsIllegalArgumentException() { - try { - // 00:60:00.000000 - // 0b000000000000000000000000000|00000|111100|000000|00000000000000000000 - // 0xF0000000 - CivilTimeEncoder.decodePacked64TimeMicros(0xF0000000L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64TimeMicros_invalidHourOfDay_throwsIllegalArgumentException() { - try { - // 24:00:00.000000 - // 0b000000000000000000000000000|11000|000000|000000|00000000000000000000 - // 0x1800000000 - CivilTimeEncoder.decodePacked64TimeMicros(0x1800000000L); - Assert.fail(); - } catch (IllegalArgumentException e) { - assertEquals("Invalid value for HourOfDay (valid values 0 - 23): 24", e.getMessage()); - } - } - - // Date Time Tests - @Test - public void encodeAndDecodePacked64DatetimeMicros_validDateTime() { - // 0001/01/01 00:00:00 - // 0b0000000000000000000000|00000000000001|0001|00001|00000|000000|000000 - // 0x4420000 - assertEquals( - 0x442000000000L, - CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( - LocalDateTime.of(1, 1, 1, 0, 0, 0, 0))); - assertEquals( - LocalDateTime.of(1, 1, 1, 0, 0, 0, 0), - CivilTimeEncoder.decodePacked64DatetimeMicrosLocalDateTime(0x442000000000L)); - - // 0001/02/03 00:01:02 - // 0b0000000000000000000000|00000000000001|0010|00011|00000|000001|000010 - // 0x4860042 - assertEquals( - 0x486004200BB8L, - CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( - LocalDateTime.of(1, 2, 3, 0, 1, 2, 3_000_000))); - assertEquals( - LocalDateTime.of(1, 2, 3, 0, 1, 2, 3_000_000), - CivilTimeEncoder.decodePacked64DatetimeMicrosLocalDateTime(0x486004200BB8L)); - - // 0001/01/01 12:00:00 - // 0b0000000000000000000000|00000000000001|0001|00001|01100|000000|000000 - // 0x442C000 - assertEquals( - 0x442C00000000L, - CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( - LocalDateTime.of(1, 1, 1, 12, 0, 0, 0))); - assertEquals( - LocalDateTime.of(1, 1, 1, 12, 0, 0, 0), - CivilTimeEncoder.decodePacked64DatetimeMicrosLocalDateTime(0x442C00000000L)); - - // 0001/01/01 13:14:15 - // 0b0000000000000000000000|00000000000001|0001|00001|01101|001110|001111 - // 0x442D38F - assertEquals( - 0x442D38F03E80L, - CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( - LocalDateTime.of(1, 1, 1, 13, 14, 15, 16_000_000))); - assertEquals( - LocalDateTime.of(1, 1, 1, 13, 14, 15, 16_000_000), - CivilTimeEncoder.decodePacked64DatetimeMicrosLocalDateTime(0x442D38F03E80L)); - - // 9999/12/31 23:59:59 - // 0b0000000000000000000000|10011100001111|1100|11111|10111|111011|111011 - // 0x9C3F3F7EFB - assertEquals( - 0x9C3F3F7EFBF3E58L, - CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( - LocalDateTime.of(9999, 12, 31, 23, 59, 59, 999_000_000))); - assertEquals( - LocalDateTime.of(9999, 12, 31, 23, 59, 59, 999_000_000), - CivilTimeEncoder.decodePacked64DatetimeMicrosLocalDateTime(0x9C3F3F7EFBF3E58L)); - } - - @Test - public void encodePacked64DateTimeMicros_giveErrorWhenPrecisionIsLost() { - // 0001/01/01 00:00:00.000000999 - // 0b0000000000000000000000|00000000000001|0001|00001|00000|000000|000000 - // 0x4420000 - try { - CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime( - LocalDateTime.of(1, 1, 1, 0, 0, 0, 999)); - Assert.fail(); - } catch (IllegalArgumentException e) { - } - } - - @Test - public void encodePacked64DatetimeMicros_invalidYear_throwsIllegalArgumentException() { - // 10000/01/01 00:00:00.000000 - // 0b00|10011100010000|0001|00001|00000|000000|000000|00000000000000000000 - // 0x9C4042000000000 - LocalDateTime dateTime = LocalDateTime.of(10000, 1, 1, 0, 0, 0, 0); - try { - CivilTimeEncoder.encodePacked64DatetimeMicrosLocalDateTime(dateTime); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64DatetimeMicros_validBitFieldDatetimeMicros() {} - - @Test - public void decodePacked64DatetimeMicros_invalidBitField() { - try { - // 0001/01/01 00:00:00 - // 0b0000000000000000000001|00000000000001|0001|00001|00000|000000|000000 - // 0x10004420000 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x10004420000L); - Assert.fail(); - } catch (IllegalArgumentException e) { - } - } - - @Test - public void decodePacked64DatetimeMicros_invalidMicroOfSecond_throwsIllegalArgumentException() { - try { - // 0001/01/01 00:00:00.1000000 - // 0b00|00000000000001|0001|00001|00000|000000|000000|11110100001001000000 - // 0x4420000F4240 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x4420000F4240L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64DatetimeMicros_invalidSecondOfMinute_throwsIllegalArgumentException() { - try { - // 0001/01/01 00:00:60.000000 - // 0b00|00000000000001|0001|00001|00000|000000|111100|00000000000000000000 - // 0x442003C00000 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x442003C00000L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64DatetimeMicros_invalidMinuteOfHour_throwsIllegalArgumentException() { - try { - // 0001/01/01 00:60:00.000000 - // 0b00|00000000000001|0001|00001|00000|111100|000000|00000000000000000000 - // 0x4420F0000000 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x4420F0000000L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64DatetimeMicros_invalidHourOfDay_throwsIllegalArgumentException() { - try { - // 0001/01/01 24:00:00.000000 - // 0b00|00000000000001|0001|00001|11000|000000|000000|00000000000000000000 - // 0x443800000000 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x443800000000L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64DatetimeMicros_invalidDayOfMonth_throwsIllegalArgumentException() { - try { - // 0001/01/00 00:00:00.000000 - // 0b00|00000000000001|0001|00000|00000|000000|000000|00000000000000000000 - // 0x440000000000 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x440000000000L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64DatetimeMicros_invalidMonthOfYear_throwsIllegalArgumentException() { - try { - // 0001/13/01 00:00:00.000000 - // 0b00|00000000000001|1101|00001|00000|000000|000000|00000000000000000000 - // 0x742000000000 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x742000000000L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } - - @Test - public void decodePacked64DatetimeMicros_invalidYear_throwsIllegalArgumentException() { - try { - // 10000/01/01 00:00:00.000000 - // 0b00|10011100010000|0001|00001|00000|000000|000000|00000000000000000000 - // 0x9C4042000000000 - CivilTimeEncoder.decodePacked64DatetimeMicros(0x9C4042000000000L); - Assert.fail(); - } catch (IllegalArgumentException expected) { - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeBigQueryWrite.java deleted file mode 100644 index 3b8b936e53..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeBigQueryWrite.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.protobuf.AbstractMessage; -import io.grpc.ServerServiceDefinition; -import java.time.Duration; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.ScheduledExecutorService; - -/** - * A fake implementation of {@link MockGrpcService}, that can be used to test clients of a - * StreamWriter. It forwards calls to the real implementation (@link FakeBigQueryWriteImpl}. - */ -public class FakeBigQueryWrite implements MockGrpcService { - private final FakeBigQueryWriteImpl serviceImpl; - - public FakeBigQueryWrite() { - serviceImpl = new FakeBigQueryWriteImpl(); - } - - @Override - public List getRequests() { - return new LinkedList(serviceImpl.getCapturedRequests()); - } - - public void waitForResponseScheduled() throws InterruptedException { - serviceImpl.waitForResponseScheduled(); - } - - public List getAppendRequests() { - return serviceImpl.getCapturedRequests(); - } - - public List getWriteStreamRequests() { - return serviceImpl.getCapturedWriteRequests(); - } - - @Override - public void addResponse(AbstractMessage response) { - if (response instanceof AppendRowsResponse) { - serviceImpl.addResponse((AppendRowsResponse) response); - } else if (response instanceof WriteStream) { - serviceImpl.addWriteStreamResponse((WriteStream) response); - } else if (response instanceof FlushRowsResponse) { - serviceImpl.addFlushRowsResponse((FlushRowsResponse) response); - } else { - throw new IllegalStateException("Unsupported service"); - } - } - - @Override - public void addException(Exception exception) { - serviceImpl.addConnectionError(exception); - } - - @Override - public ServerServiceDefinition getServiceDefinition() { - return serviceImpl.bindService(); - } - - @Override - public void reset() { - serviceImpl.reset(); - } - - public void setResponseDelay(Duration delay) { - serviceImpl.setResponseDelay(delay); - } - - public void setResponseSleep(Duration sleep) { - serviceImpl.setResponseSleep(sleep); - } - - public void setExecutor(ScheduledExecutorService executor) { - serviceImpl.setExecutor(executor); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeBigQueryWriteImpl.java deleted file mode 100644 index 595ea4bc63..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeBigQueryWriteImpl.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import com.google.common.base.Optional; -import com.google.common.util.concurrent.Uninterruptibles; -import io.grpc.stub.StreamObserver; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.logging.Logger; - -/** - * A fake implementation of {@link BigQueryWriteImplBase} that can acts like server in StreamWriter - * unit testing. - */ -class FakeBigQueryWriteImpl extends BigQueryWriteGrpc.BigQueryWriteImplBase { - private static final Logger LOG = Logger.getLogger(FakeBigQueryWriteImpl.class.getName()); - - private final LinkedBlockingQueue requests = new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue writeRequests = - new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue flushRequests = new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue responses = new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue writeResponses = new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue flushResponses = new LinkedBlockingQueue<>(); - private final AtomicInteger nextMessageId = new AtomicInteger(1); - private boolean autoPublishResponse; - private ScheduledExecutorService executor = null; - private Duration responseDelay = Duration.ZERO; - - private Duration responseSleep = Duration.ZERO; - private Semaphore responseSemaphore = new Semaphore(0, true); - - /** Class used to save the state of a possible response. */ - private static class Response { - Optional appendResponse; - Optional error; - - public Response(AppendRowsResponse appendResponse) { - this.appendResponse = Optional.of(appendResponse); - this.error = Optional.absent(); - } - - public Response(Throwable exception) { - this.appendResponse = Optional.absent(); - this.error = Optional.of(exception); - } - - public AppendRowsResponse getResponse() { - return appendResponse.get(); - } - - public Throwable getError() { - return error.get(); - } - - boolean isError() { - return error.isPresent(); - } - - @Override - public String toString() { - if (isError()) { - return error.get().toString(); - } - return appendResponse.get().toString(); - } - } - - @Override - public void getWriteStream( - GetWriteStreamRequest request, StreamObserver responseObserver) { - Object response = writeResponses.remove(); - if (response instanceof WriteStream) { - writeRequests.add(request); - responseObserver.onNext((WriteStream) response); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError((Exception) response); - } else { - responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); - } - } - - @Override - public void flushRows( - FlushRowsRequest request, StreamObserver responseObserver) { - Object response = writeResponses.remove(); - if (response instanceof FlushRowsResponse) { - flushRequests.add(request); - responseObserver.onNext((FlushRowsResponse) response); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError((Exception) response); - } else { - responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); - } - } - - public void waitForResponseScheduled() throws InterruptedException { - responseSemaphore.acquire(); - } - - @Override - public StreamObserver appendRows( - final StreamObserver responseObserver) { - StreamObserver requestObserver = - new StreamObserver() { - @Override - public void onNext(AppendRowsRequest value) { - LOG.fine("Get request:" + value.toString()); - final Response response = responses.remove(); - requests.add(value); - if (responseSleep.compareTo(Duration.ZERO) > 0) { - LOG.info("Sleeping before response for " + responseSleep.toString()); - Uninterruptibles.sleepUninterruptibly( - responseSleep.toMillis(), TimeUnit.MILLISECONDS); - } - if (responseDelay == Duration.ZERO) { - sendResponse(response, responseObserver); - } else { - final Response responseToSend = response; - // TODO(yirutang): This is very wrong because it messes up response/complete ordering. - LOG.fine("Schedule a response to be sent at delay"); - executor.schedule( - new Runnable() { - @Override - public void run() { - sendResponse(responseToSend, responseObserver); - } - }, - responseDelay.toMillis(), - TimeUnit.MILLISECONDS); - } - responseSemaphore.release(); - } - - @Override - public void onError(Throwable t) { - responseObserver.onError(t); - } - - @Override - public void onCompleted() { - responseObserver.onCompleted(); - } - }; - return requestObserver; - } - - private void sendResponse( - Response response, StreamObserver responseObserver) { - LOG.fine("Sending response: " + response.toString()); - if (response.isError()) { - responseObserver.onError(response.getError()); - } else { - responseObserver.onNext(response.getResponse()); - } - } - - /** Set an executor to use to delay publish responses. */ - public FakeBigQueryWriteImpl setExecutor(ScheduledExecutorService executor) { - this.executor = executor; - return this; - } - - /** Set an amount of time by which to delay publish responses. */ - public FakeBigQueryWriteImpl setResponseDelay(Duration responseDelay) { - this.responseDelay = responseDelay; - return this; - } - - /** Set an amount of time by which to sleep before publishing responses. */ - public FakeBigQueryWriteImpl setResponseSleep(Duration responseSleep) { - this.responseSleep = responseSleep; - return this; - } - - public FakeBigQueryWriteImpl addResponse(AppendRowsResponse appendRowsResponse) { - responses.add(new Response(appendRowsResponse)); - return this; - } - - public FakeBigQueryWriteImpl addResponse(AppendRowsResponse.Builder appendResponseBuilder) { - return addResponse(appendResponseBuilder.build()); - } - - public FakeBigQueryWriteImpl addWriteStreamResponse(WriteStream response) { - writeResponses.add(response); - return this; - } - - public FakeBigQueryWriteImpl addFlushRowsResponse(FlushRowsResponse response) { - flushResponses.add(response); - return this; - } - - public FakeBigQueryWriteImpl addConnectionError(Throwable error) { - responses.add(new Response(error)); - return this; - } - - public List getCapturedRequests() { - return new ArrayList(requests); - } - - public List getCapturedWriteRequests() { - return new ArrayList(writeRequests); - } - - public void reset() { - requests.clear(); - responses.clear(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeClock.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeClock.java deleted file mode 100644 index c5b8610d6e..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeClock.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import com.google.api.core.ApiClock; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -/** A Clock to help with testing time-based logic. */ -public class FakeClock implements ApiClock { - - private final AtomicLong millis = new AtomicLong(); - - // Advances the clock value by {@code time} in {@code timeUnit}. - public void advance(long time, TimeUnit timeUnit) { - millis.addAndGet(timeUnit.toMillis(time)); - } - - @Override - public long nanoTime() { - return millisTime() * 1000_000L; - } - - @Override - public long millisTime() { - return millis.get(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeScheduledExecutorService.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeScheduledExecutorService.java deleted file mode 100644 index bc4ec137dd..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/FakeScheduledExecutorService.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import com.google.api.core.ApiClock; -import com.google.common.primitives.Ints; -import com.google.common.util.concurrent.SettableFuture; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Deque; -import java.util.LinkedList; -import java.util.List; -import java.util.PriorityQueue; -import java.util.concurrent.AbstractExecutorService; -import java.util.concurrent.Callable; -import java.util.concurrent.Delayed; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Logger; - -/** - * Fake implementation of {@link ScheduledExecutorService} that allows tests control the reference - * time of the executor and decide when to execute any outstanding task. - */ -public class FakeScheduledExecutorService extends AbstractExecutorService - implements ScheduledExecutorService { - private static final Logger LOG = Logger.getLogger(FakeScheduledExecutorService.class.getName()); - - private final AtomicBoolean shutdown = new AtomicBoolean(false); - private final PriorityQueue> pendingCallables = new PriorityQueue<>(); - private final FakeClock clock = new FakeClock(); - private final Deque expectedWorkQueue = new LinkedList<>(); - - public ApiClock getClock() { - return clock; - } - - @Override - public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(delay)), command, PendingCallableType.NORMAL)); - } - - @Override - public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(delay)), callable, PendingCallableType.NORMAL)); - } - - @Override - public ScheduledFuture scheduleAtFixedRate( - Runnable command, long initialDelay, long period, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(initialDelay)), - command, - PendingCallableType.FIXED_RATE)); - } - - @Override - public ScheduledFuture scheduleWithFixedDelay( - Runnable command, long initialDelay, long delay, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(initialDelay)), - command, - PendingCallableType.FIXED_DELAY)); - } - - /** - * This will advance the reference time of the executor and execute (in the same thread) any - * outstanding callable which execution time has passed. - */ - public void advanceTime(Duration toAdvance) { - LOG.info( - "Advance to time to:" - + Instant.ofEpochMilli(clock.millisTime() + toAdvance.toMillis()).toString()); - clock.advance(toAdvance.toMillis(), TimeUnit.MILLISECONDS); - work(); - } - - private void work() { - for (; ; ) { - PendingCallable callable = null; - Instant cmpTime = Instant.ofEpochMilli(clock.millisTime()); - if (!pendingCallables.isEmpty()) { - LOG.info( - "Going to call: Current time: " - + cmpTime.toString() - + " Scheduled time: " - + pendingCallables.peek().getScheduledTime().toString() - + " Creation time:" - + pendingCallables.peek().getCreationTime().toString()); - } - synchronized (pendingCallables) { - if (pendingCallables.isEmpty() - || pendingCallables.peek().getScheduledTime().isAfter(cmpTime)) { - break; - } - callable = pendingCallables.poll(); - } - if (callable != null) { - try { - callable.call(); - } catch (Exception e) { - // We ignore any callable exception, which should be set to the future but not relevant to - // advanceTime. - } - } - } - - synchronized (pendingCallables) { - if (shutdown.get() && pendingCallables.isEmpty()) { - pendingCallables.notifyAll(); - } - } - } - - @Override - public void shutdown() { - if (shutdown.getAndSet(true)) { - throw new IllegalStateException("This executor has been shutdown already"); - } - } - - @Override - public List shutdownNow() { - if (shutdown.getAndSet(true)) { - throw new IllegalStateException("This executor has been shutdown already"); - } - List pending = new ArrayList<>(); - for (final PendingCallable pendingCallable : pendingCallables) { - pending.add( - new Runnable() { - @Override - public void run() { - pendingCallable.call(); - } - }); - } - synchronized (pendingCallables) { - pendingCallables.notifyAll(); - pendingCallables.clear(); - } - return pending; - } - - @Override - public boolean isShutdown() { - return shutdown.get(); - } - - @Override - public boolean isTerminated() { - return pendingCallables.isEmpty(); - } - - @Override - public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { - synchronized (pendingCallables) { - if (pendingCallables.isEmpty()) { - return true; - } - LOG.info("Wating on pending callables" + pendingCallables.size()); - pendingCallables.wait(unit.toMillis(timeout)); - return pendingCallables.isEmpty(); - } - } - - @Override - public void execute(Runnable command) { - if (shutdown.get()) { - throw new IllegalStateException("This executor has been shutdown"); - } - command.run(); - } - - ScheduledFuture schedulePendingCallable(PendingCallable callable) { - LOG.info( - "Schedule pending callable called " + callable.delay + " " + callable.getScheduledTime()); - if (shutdown.get()) { - throw new IllegalStateException("This executor has been shutdown"); - } - synchronized (pendingCallables) { - pendingCallables.add(callable); - } - work(); - synchronized (expectedWorkQueue) { - // We compare by the callable delay in order decide when to remove expectations from the - // expected work queue, i.e. only the expected work that matches the delay of the scheduled - // callable is removed from the queue. - if (!expectedWorkQueue.isEmpty() && expectedWorkQueue.peek().equals(callable.delay)) { - expectedWorkQueue.poll(); - } - expectedWorkQueue.notifyAll(); - } - - return callable.getScheduledFuture(); - } - - enum PendingCallableType { - NORMAL, - FIXED_RATE, - FIXED_DELAY - } - - /** Class that saves the state of an scheduled pending callable. */ - class PendingCallable implements Comparable> { - Instant creationTime = Instant.ofEpochMilli(clock.millisTime()); - Duration delay; - Callable pendingCallable; - SettableFuture future = SettableFuture.create(); - AtomicBoolean cancelled = new AtomicBoolean(false); - AtomicBoolean done = new AtomicBoolean(false); - PendingCallableType type; - - PendingCallable(Duration delay, final Runnable runnable, PendingCallableType type) { - pendingCallable = - new Callable() { - @Override - public T call() { - runnable.run(); - return null; - } - }; - this.type = type; - this.delay = delay; - } - - PendingCallable(Duration delay, Callable callable, PendingCallableType type) { - pendingCallable = callable; - this.type = type; - this.delay = delay; - } - - private Instant getScheduledTime() { - return creationTime.plus(delay); - } - - private Instant getCreationTime() { - return creationTime; - } - - ScheduledFuture getScheduledFuture() { - return new ScheduledFuture() { - @Override - public long getDelay(TimeUnit unit) { - return unit.convert( - getScheduledTime().toEpochMilli() - clock.millisTime(), TimeUnit.MILLISECONDS); - } - - @Override - public int compareTo(Delayed o) { - return Ints.saturatedCast( - getDelay(TimeUnit.MILLISECONDS) - o.getDelay(TimeUnit.MILLISECONDS)); - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - synchronized (this) { - cancelled.set(true); - return !done.get(); - } - } - - @Override - public boolean isCancelled() { - return cancelled.get(); - } - - @Override - public boolean isDone() { - return done.get(); - } - - @Override - public T get() throws InterruptedException, ExecutionException { - return future.get(); - } - - @Override - public T get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return future.get(timeout, unit); - } - }; - } - - T call() { - T result = null; - synchronized (this) { - if (cancelled.get()) { - return null; - } - try { - result = pendingCallable.call(); - future.set(result); - } catch (Exception e) { - future.setException(e); - } finally { - switch (type) { - case NORMAL: - done.set(true); - break; - case FIXED_DELAY: - this.creationTime = Instant.ofEpochMilli(clock.millisTime()); - schedulePendingCallable(this); - break; - case FIXED_RATE: - this.creationTime = this.creationTime.plus(delay); - schedulePendingCallable(this); - break; - default: - // Nothing to do - } - } - } - return result; - } - - @Override - public int compareTo(PendingCallable other) { - return getScheduledTime().compareTo(other.getScheduledTime()); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriterTest.java deleted file mode 100644 index 37f82d6698..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriterTest.java +++ /dev/null @@ -1,391 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import static org.junit.Assert.assertEquals; - -import com.google.api.core.ApiFuture; -import com.google.api.gax.core.ExecutorProvider; -import com.google.api.gax.core.InstantiatingExecutorProvider; -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.testing.LocalChannelProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.cloud.bigquery.storage.test.JsonTest; -import com.google.cloud.bigquery.storage.test.Test.FooType; -import com.google.protobuf.Descriptors.DescriptorValidationException; -import com.google.protobuf.Int64Value; -import com.google.protobuf.Timestamp; -import java.io.IOException; -import java.time.Instant; -import java.time.LocalTime; -import java.util.Arrays; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.logging.Logger; -import org.json.JSONArray; -import org.json.JSONObject; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class JsonStreamWriterTest { - private static final Logger LOG = Logger.getLogger(JsonStreamWriterTest.class.getName()); - private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/s"; - private static final String TEST_TABLE = "projects/p/datasets/d/tables/t"; - private static final ExecutorProvider SINGLE_THREAD_EXECUTOR = - InstantiatingExecutorProvider.newBuilder().setExecutorThreadCount(1).build(); - private static LocalChannelProvider channelProvider; - private FakeScheduledExecutorService fakeExecutor; - private FakeBigQueryWrite testBigQueryWrite; - private static MockServiceHelper serviceHelper; - - private final TableFieldSchema FOO = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("foo") - .build(); - private final TableSchema TABLE_SCHEMA = TableSchema.newBuilder().addFields(0, FOO).build(); - - private final TableFieldSchema BAR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("bar") - .build(); - private final TableFieldSchema BAZ = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("baz") - .build(); - private final TableSchema UPDATED_TABLE_SCHEMA = - TableSchema.newBuilder().addFields(0, FOO).addFields(1, BAR).build(); - private final TableSchema UPDATED_TABLE_SCHEMA_2 = - TableSchema.newBuilder().addFields(0, FOO).addFields(1, BAR).addFields(2, BAZ).build(); - - private final TableFieldSchema TEST_INT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_int") - .build(); - private final TableFieldSchema TEST_STRING = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_string") - .build(); - - @Before - public void setUp() throws Exception { - testBigQueryWrite = new FakeBigQueryWrite(); - serviceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); - serviceHelper.start(); - channelProvider = serviceHelper.createChannelProvider(); - fakeExecutor = new FakeScheduledExecutorService(); - testBigQueryWrite.setExecutor(fakeExecutor); - Instant time = Instant.now(); - Timestamp timestamp = - Timestamp.newBuilder().setSeconds(time.getEpochSecond()).setNanos(time.getNano()).build(); - // Add enough GetWriteStream response. - for (int i = 0; i < 4; i++) { - testBigQueryWrite.addResponse( - WriteStream.newBuilder().setName(TEST_STREAM).setCreateTime(timestamp).build()); - } - } - - @After - public void tearDown() throws Exception { - serviceHelper.stop(); - } - - private JsonStreamWriter.Builder getTestJsonStreamWriterBuilder( - String testStream, TableSchema BQTableSchema) { - return JsonStreamWriter.newBuilder(testStream, BQTableSchema) - .setChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()); - } - - @Test - public void testTwoParamNewBuilder_nullSchema() { - try { - getTestJsonStreamWriterBuilder(null, TABLE_SCHEMA); - Assert.fail("expected NullPointerException"); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "StreamOrTableName is null."); - } - } - - @Test - public void testTwoParamNewBuilder_nullStream() { - try { - getTestJsonStreamWriterBuilder(TEST_STREAM, null); - Assert.fail("expected NullPointerException"); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "TableSchema is null."); - } - } - - @Test - public void testTwoParamNewBuilder() - throws DescriptorValidationException, IOException, InterruptedException { - JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); - assertEquals(TEST_STREAM, writer.getStreamName()); - } - - @Test - public void testSingleAppendSimpleJson() throws Exception { - FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA) - .setTraceId("test:empty") - .build()) { - - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) - .build()); - - ApiFuture appendFuture = writer.append(jsonArr); - assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); - appendFuture.get(); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(0), - expectedProto.toByteString()); - assertEquals( - testBigQueryWrite.getAppendRequests().get(0).getTraceId(), "JsonWriterBeta_test:empty"); - } - } - - @Test - public void testSpecialTypeAppend() throws Exception { - TableFieldSchema field = - TableFieldSchema.newBuilder() - .setName("time") - .setType(TableFieldSchema.Type.TIME) - .setMode(TableFieldSchema.Mode.REPEATED) - .build(); - TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); - - JsonTest.TestTime expectedProto = - JsonTest.TestTime.newBuilder() - .addTime(CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(1, 0, 1))) - .build(); - JSONObject foo = new JSONObject(); - foo.put("time", new JSONArray(new String[] {"01:00:01"})); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, tableSchema).build()) { - - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) - .build()); - - ApiFuture appendFuture = writer.append(jsonArr); - assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); - appendFuture.get(); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(0), - expectedProto.toByteString()); - } - } - - @Test - public void testSingleAppendMultipleSimpleJson() throws Exception { - FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONObject foo1 = new JSONObject(); - foo1.put("foo", "allen"); - JSONObject foo2 = new JSONObject(); - foo2.put("foo", "allen"); - JSONObject foo3 = new JSONObject(); - foo3.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - jsonArr.put(foo1); - jsonArr.put(foo2); - jsonArr.put(foo3); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) - .build()); - - ApiFuture appendFuture = writer.append(jsonArr); - - assertEquals(0L, appendFuture.get().getAppendResult().getOffset().getValue()); - appendFuture.get(); - assertEquals( - 4, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite.getAppendRequests().get(0).getTraceId(), "JsonWriterBeta:null"); - for (int i = 0; i < 4; i++) { - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(i), - expectedProto.toByteString()); - } - } - } - - @Test - public void testMultipleAppendSimpleJson() throws Exception { - FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(0)).build()) - .build()); - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(1)).build()) - .build()); - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(2)).build()) - .build()); - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(3)).build()) - .build()); - ApiFuture appendFuture; - for (int i = 0; i < 4; i++) { - appendFuture = writer.append(jsonArr); - assertEquals((long) i, appendFuture.get().getAppendResult().getOffset().getValue()); - appendFuture.get(); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRows(0), - expectedProto.toByteString()); - } - } - } - - @Test - public void testAppendOutOfRangeException() throws Exception { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse( - AppendRowsResponse.newBuilder() - .setError(com.google.rpc.Status.newBuilder().setCode(11).build()) - .build()); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - ApiFuture appendFuture = writer.append(jsonArr); - try { - appendFuture.get(); - Assert.fail("expected ExecutionException"); - } catch (ExecutionException ex) { - assertEquals(ex.getCause().getMessage(), "OUT_OF_RANGE: "); - } - } - } - - @Test - public void testCreateDefaultStream() throws Exception { - TableSchema tableSchema = - TableSchema.newBuilder().addFields(0, TEST_INT).addFields(1, TEST_STRING).build(); - try (JsonStreamWriter writer = - JsonStreamWriter.newBuilder(TEST_TABLE, tableSchema) - .setChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()) - .build()) { - assertEquals("projects/p/datasets/d/tables/t/_default", writer.getStreamName()); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/JsonToProtoMessageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/JsonToProtoMessageTest.java deleted file mode 100644 index 3c6c8154c1..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/JsonToProtoMessageTest.java +++ /dev/null @@ -1,1131 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import com.google.cloud.bigquery.storage.test.JsonTest.*; -import com.google.cloud.bigquery.storage.test.SchemaTest.*; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.Message; -import java.math.BigDecimal; -import java.time.LocalTime; -import java.util.ArrayList; -import java.util.Map; -import java.util.logging.Logger; -import org.json.JSONArray; -import org.json.JSONObject; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class JsonToProtoMessageTest { - private static final Logger LOG = Logger.getLogger(JsonToProtoMessageTest.class.getName()); - private static ImmutableMap AllTypesToDebugMessageTest = - new ImmutableMap.Builder() - .put(BoolType.getDescriptor(), "boolean") - .put(BytesType.getDescriptor(), "bytes") - .put(Int64Type.getDescriptor(), "int64") - .put(Int32Type.getDescriptor(), "int32") - .put(DoubleType.getDescriptor(), "double") - .put(StringType.getDescriptor(), "string") - .put(RepeatedType.getDescriptor(), "array") - .put(ObjectType.getDescriptor(), "object") - .build(); - - private static ImmutableMap AllTypesToCorrectProto = - new ImmutableMap.Builder() - .put( - BoolType.getDescriptor(), - new Message[] {BoolType.newBuilder().setTestFieldType(true).build()}) - .put( - BytesType.getDescriptor(), - new Message[] { - BytesType.newBuilder().setTestFieldType(ByteString.copyFromUtf8("test")).build(), - BytesType.newBuilder() - .setTestFieldType(ByteString.copyFrom(new byte[] {1, 2, 3})) - .build() - }) - .put( - Int64Type.getDescriptor(), - new Message[] { - Int64Type.newBuilder().setTestFieldType(Long.MAX_VALUE).build(), - Int64Type.newBuilder().setTestFieldType(new Long(Integer.MAX_VALUE)).build() - }) - .put( - Int32Type.getDescriptor(), - new Message[] {Int32Type.newBuilder().setTestFieldType(Integer.MAX_VALUE).build()}) - .put( - DoubleType.getDescriptor(), - new Message[] {DoubleType.newBuilder().setTestFieldType(1.23).build()}) - .put( - StringType.getDescriptor(), - new Message[] {StringType.newBuilder().setTestFieldType("test").build()}) - .put( - RepeatedType.getDescriptor(), - new Message[] { - RepeatedType.newBuilder() - .addAllTestFieldType( - new ArrayList() { - { - add(1L); - add(2L); - add(3L); - } - }) - .build() - }) - .put( - ObjectType.getDescriptor(), - new Message[] { - ObjectType.newBuilder() - .setTestFieldType(ComplexLvl2.newBuilder().setTestInt(1).build()) - .build() - }) - .build(); - - private static ImmutableMap AllRepeatedTypesToDebugMessageTest = - new ImmutableMap.Builder() - .put(RepeatedBool.getDescriptor(), "boolean") - .put(RepeatedBytes.getDescriptor(), "bytes") - .put(RepeatedInt64.getDescriptor(), "int64") - .put(RepeatedInt32.getDescriptor(), "int32") - .put(RepeatedDouble.getDescriptor(), "double") - .put(RepeatedString.getDescriptor(), "string") - .put(RepeatedObject.getDescriptor(), "object") - .build(); - - private static ImmutableMap AllRepeatedTypesToCorrectProto = - new ImmutableMap.Builder() - .put( - RepeatedBool.getDescriptor(), - new Message[] { - RepeatedBool.newBuilder().addTestRepeated(true).addTestRepeated(false).build() - }) - .put( - RepeatedBytes.getDescriptor(), - new Message[] { - RepeatedBytes.newBuilder() - .addTestRepeated(ByteString.copyFrom(new byte[] {0})) - .addTestRepeated(ByteString.copyFrom(new byte[] {0, -116, -122, 71})) - .build(), - RepeatedBytes.newBuilder() - .addTestRepeated( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0"))) - .addTestRepeated( - BigDecimalByteStringEncoder.encodeToNumericByteString( - new BigDecimal("1.2"))) - .build() - }) - .put( - RepeatedString.getDescriptor(), - new Message[] { - RepeatedString.newBuilder().addTestRepeated("hello").addTestRepeated("test").build() - }) - .put( - RepeatedInt64.getDescriptor(), - new Message[] { - RepeatedInt64.newBuilder() - .addTestRepeated(Long.MAX_VALUE) - .addTestRepeated(Long.MIN_VALUE) - .addTestRepeated(Integer.MAX_VALUE) - .addTestRepeated(Integer.MIN_VALUE) - .addTestRepeated(Short.MAX_VALUE) - .addTestRepeated(Short.MIN_VALUE) - .addTestRepeated(Byte.MAX_VALUE) - .addTestRepeated(Byte.MIN_VALUE) - .addTestRepeated(0) - .build(), - RepeatedInt64.newBuilder() - .addTestRepeated(Integer.MAX_VALUE) - .addTestRepeated(Integer.MIN_VALUE) - .addTestRepeated(Short.MAX_VALUE) - .addTestRepeated(Short.MIN_VALUE) - .addTestRepeated(Byte.MAX_VALUE) - .addTestRepeated(Byte.MIN_VALUE) - .addTestRepeated(0) - .build() - }) - .put( - RepeatedInt32.getDescriptor(), - new Message[] { - RepeatedInt32.newBuilder() - .addTestRepeated(Integer.MAX_VALUE) - .addTestRepeated(Integer.MIN_VALUE) - .addTestRepeated(Short.MAX_VALUE) - .addTestRepeated(Short.MIN_VALUE) - .addTestRepeated(Byte.MAX_VALUE) - .addTestRepeated(Byte.MIN_VALUE) - .addTestRepeated(0) - .build() - }) - .put( - RepeatedDouble.getDescriptor(), - new Message[] { - RepeatedDouble.newBuilder() - .addTestRepeated(Double.MAX_VALUE) - .addTestRepeated(Double.MIN_VALUE) - .addTestRepeated(Float.MAX_VALUE) - .addTestRepeated(Float.MIN_VALUE) - .build(), - RepeatedDouble.newBuilder() - .addTestRepeated(Float.MAX_VALUE) - .addTestRepeated(Float.MIN_VALUE) - .build() - }) - .put( - RepeatedObject.getDescriptor(), - new Message[] { - RepeatedObject.newBuilder() - .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(1).build()) - .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(2).build()) - .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(3).build()) - .build() - }) - .build(); - - private static JSONObject[] simpleJSONObjects = { - new JSONObject().put("test_field_type", Long.MAX_VALUE), - new JSONObject().put("test_field_type", Integer.MAX_VALUE), - new JSONObject().put("test_field_type", 1.23), - new JSONObject().put("test_field_type", true), - new JSONObject().put("test_field_type", ByteString.copyFromUtf8("test")), - new JSONObject().put("test_field_type", new JSONArray("[1, 2, 3]")), - new JSONObject().put("test_field_type", new JSONObject().put("test_int", 1)), - new JSONObject().put("test_field_type", "test") - }; - - private static JSONObject[] simpleJSONArrays = { - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new Long[] { - Long.MAX_VALUE, - Long.MIN_VALUE, - (long) Integer.MAX_VALUE, - (long) Integer.MIN_VALUE, - (long) Short.MAX_VALUE, - (long) Short.MIN_VALUE, - (long) Byte.MAX_VALUE, - (long) Byte.MIN_VALUE, - 0L - })), - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new Integer[] { - Integer.MAX_VALUE, - Integer.MIN_VALUE, - (int) Short.MAX_VALUE, - (int) Short.MIN_VALUE, - (int) Byte.MAX_VALUE, - (int) Byte.MIN_VALUE, - 0 - })), - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new Double[] { - Double.MAX_VALUE, - Double.MIN_VALUE, - (double) Float.MAX_VALUE, - (double) Float.MIN_VALUE - })), - new JSONObject() - .put("test_repeated", new JSONArray(new Float[] {Float.MAX_VALUE, Float.MIN_VALUE})), - new JSONObject().put("test_repeated", new JSONArray(new Boolean[] {true, false})), - new JSONObject().put("test_repeated", new JSONArray(new String[] {"hello", "test"})), - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new byte[][] { - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0")) - .toByteArray(), - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.2")) - .toByteArray() - })), - new JSONObject().put("test_repeated", new JSONArray(new int[][] {{11111, 22222}})), - new JSONObject().put("test_repeated", new JSONArray(new char[][] {{'a', 'b'}, {'c'}})), - new JSONObject().put("test_repeated", new JSONArray(new String[][] {{"hello"}, {"test"}})), - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new JSONObject[] { - new JSONObject().put("test_int", 1), - new JSONObject().put("test_int", 2), - new JSONObject().put("test_int", 3) - })) - }; - - private final TableFieldSchema TEST_INT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INT64) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_int") - .build(); - private final TableFieldSchema TEST_STRING = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRING) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_string") - .build(); - private final TableFieldSchema TEST_BYTES = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.BYTES) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("test_bytes") - .build(); - private final TableFieldSchema TEST_BOOL = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.BOOL) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bool") - .build(); - private final TableFieldSchema TEST_DOUBLE = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DOUBLE) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_double") - .build(); - private final TableFieldSchema TEST_DATE = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DATE) - .setMode(TableFieldSchema.Mode.REQUIRED) - .setName("test_date") - .build(); - private final TableFieldSchema TEST_DATETIME = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DATETIME) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_datetime") - .build(); - private final TableFieldSchema TEST_DATETIME_STR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.DATETIME) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_datetime_str") - .build(); - private final TableFieldSchema COMPLEXLVL2 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.REQUIRED) - .addFields(0, TEST_INT) - .setName("complex_lvl2") - .build(); - private final TableFieldSchema COMPLEXLVL1 = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.STRUCT) - .setMode(TableFieldSchema.Mode.REQUIRED) - .addFields(0, TEST_INT) - .addFields(1, COMPLEXLVL2) - .setName("complex_lvl1") - .build(); - private final TableFieldSchema TEST_NUMERIC = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric") - .build(); - private final TableFieldSchema TEST_NUMERIC_REPEATED = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_numeric_repeated") - .build(); - private final TableFieldSchema TEST_GEO = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.GEOGRAPHY) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_geo") - .build(); - private final TableFieldSchema TEST_TIMESTAMP = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.TIMESTAMP) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_timestamp") - .build(); - private final TableFieldSchema TEST_TIME = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.TIME) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_time") - .build(); - private final TableFieldSchema TEST_TIME_STR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.TIME) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_time_str") - .build(); - private final TableFieldSchema TEST_NUMERIC_STR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_str") - .build(); - private final TableFieldSchema TEST_NUMERIC_SHORT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_short") - .build(); - private final TableFieldSchema TEST_NUMERIC_INT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_int") - .build(); - private final TableFieldSchema TEST_NUMERIC_LONG = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_long") - .build(); - private final TableFieldSchema TEST_NUMERIC_FLOAT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_float") - .build(); - private final TableFieldSchema TEST_NUMERIC_DOUBLE = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric_double") - .build(); - private final TableFieldSchema TEST_BIGNUMERIC = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric") - .build(); - private final TableFieldSchema TEST_BIGNUMERIC_STR = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_bignumeric_str") - .build(); - - private final TableFieldSchema TEST_BIGNUMERIC_SHORT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_short") - .build(); - private final TableFieldSchema TEST_BIGNUMERIC_INT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_int") - .build(); - private final TableFieldSchema TEST_BIGNUMERIC_LONG = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_long") - .build(); - private final TableFieldSchema TEST_BIGNUMERIC_FLOAT = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_float") - .build(); - private final TableFieldSchema TEST_BIGNUMERIC_DOUBLE = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_bignumeric_double") - .build(); - private final TableFieldSchema TEST_INTERVAL = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.INTERVAL) - .setMode(TableFieldSchema.Mode.NULLABLE) - .setName("test_interval") - .build(); - private final TableFieldSchema TEST_JSON = - TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.JSON) - .setMode(TableFieldSchema.Mode.REPEATED) - .setName("test_json") - .build(); - private final TableSchema COMPLEX_TABLE_SCHEMA = - TableSchema.newBuilder() - .addFields(0, TEST_INT) - .addFields(1, TEST_STRING) - .addFields(2, TEST_BYTES) - .addFields(3, TEST_BOOL) - .addFields(4, TEST_DOUBLE) - .addFields(5, TEST_DATE) - .addFields(6, TEST_DATETIME) - .addFields(7, TEST_DATETIME_STR) - .addFields(8, COMPLEXLVL1) - .addFields(9, COMPLEXLVL2) - .addFields(10, TEST_NUMERIC) - .addFields(11, TEST_GEO) - .addFields(12, TEST_TIMESTAMP) - .addFields(13, TEST_TIME) - .addFields(14, TEST_TIME_STR) - .addFields(15, TEST_NUMERIC_REPEATED) - .addFields(16, TEST_NUMERIC_STR) - .addFields(17, TEST_NUMERIC_SHORT) - .addFields(18, TEST_NUMERIC_INT) - .addFields(19, TEST_NUMERIC_LONG) - .addFields(20, TEST_NUMERIC_FLOAT) - .addFields(21, TEST_NUMERIC_DOUBLE) - .addFields(22, TEST_BIGNUMERIC) - .addFields(23, TEST_BIGNUMERIC_STR) - .addFields(24, TEST_BIGNUMERIC_SHORT) - .addFields(25, TEST_BIGNUMERIC_INT) - .addFields(26, TEST_BIGNUMERIC_LONG) - .addFields(27, TEST_BIGNUMERIC_FLOAT) - .addFields(28, TEST_BIGNUMERIC_DOUBLE) - .addFields(29, TEST_INTERVAL) - .addFields(30, TEST_JSON) - .build(); - - @Test - public void testDifferentNameCasing() throws Exception { - TestInt64 expectedProto = - TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).build(); - - JSONObject json = new JSONObject(); - json.put("bYtE", (byte) 1); - json.put("SHORT", (short) 1); - json.put("inT", 1); - json.put("lONg", 1L); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testInt64() throws Exception { - TestInt64 expectedProto = - TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).build(); - JSONObject json = new JSONObject(); - json.put("byte", (byte) 1); - json.put("short", (short) 1); - json.put("int", 1); - json.put("long", 1L); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testInt32() throws Exception { - TestInt32 expectedProto = TestInt32.newBuilder().setByte(1).setShort(1).setInt(1).build(); - JSONObject json = new JSONObject(); - json.put("byte", (byte) 1); - json.put("short", (short) 1); - json.put("int", 1); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt32.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testInt32NotMatchInt64() throws Exception { - JSONObject json = new JSONObject(); - json.put("byte", (byte) 1); - json.put("short", (short) 1); - json.put("int", 1L); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt32.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int32 field at root.int.", e.getMessage()); - } - } - - @Test - public void testDateTimeMismatch() throws Exception { - TableFieldSchema field = - TableFieldSchema.newBuilder() - .setName("datetime") - .setType(TableFieldSchema.Type.DATETIME) - .setMode(TableFieldSchema.Mode.REPEATED) - .build(); - TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); - JSONObject json = new JSONObject(); - json.put("datetime", 1.0); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage( - TestDatetime.getDescriptor(), tableSchema, json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int64 field at root.datetime.", e.getMessage()); - } - } - - @Test - public void testTimeMismatch() throws Exception { - TableFieldSchema field = - TableFieldSchema.newBuilder() - .setName("time") - .setType(TableFieldSchema.Type.TIME) - .setMode(TableFieldSchema.Mode.REPEATED) - .build(); - TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); - JSONObject json = new JSONObject(); - json.put("time", new JSONArray(new Double[] {1.0})); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestTime.getDescriptor(), tableSchema, json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int64 field at root.time[0].", e.getMessage()); - } - } - - @Test - public void testMixedCasedFieldNames() throws Exception { - com.google.cloud.bigquery.storage.v1.TableFieldSchema field = - com.google.cloud.bigquery.storage.v1.TableFieldSchema.newBuilder() - .setName("fooBar") - .setType(com.google.cloud.bigquery.storage.v1.TableFieldSchema.Type.STRING) - .setMode(com.google.cloud.bigquery.storage.v1.TableFieldSchema.Mode.NULLABLE) - .build(); - com.google.cloud.bigquery.storage.v1.TableSchema tableSchema = - com.google.cloud.bigquery.storage.v1.TableSchema.newBuilder().addFields(field).build(); - - JSONObject json = new JSONObject(); - json.put("fooBar", "hello"); - - DynamicMessage protoMsg = - com.google.cloud.bigquery.storage.v1.JsonToProtoMessage.INSTANCE.convertToProtoMessage( - TestMixedCaseFieldNames.getDescriptor(), tableSchema, json); - } - - @Test - public void testDouble() throws Exception { - TestDouble expectedProto = TestDouble.newBuilder().setDouble(1.2).setFloat(3.4f).build(); - JSONObject json = new JSONObject(); - json.put("double", 1.2); - json.put("float", 3.4f); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestDouble.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testAllTypes() throws Exception { - for (Map.Entry entry : AllTypesToDebugMessageTest.entrySet()) { - int success = 0; - for (JSONObject json : simpleJSONObjects) { - try { - LOG.info("Testing " + json + " over " + entry.getKey().getFullName()); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(entry.getKey(), json); - LOG.info("Convert Success!"); - assertEquals(protoMsg, AllTypesToCorrectProto.get(entry.getKey())[success]); - success += 1; - } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a " + entry.getValue() + " field at root.test_field_type.", - e.getMessage()); - } - } - if (entry.getKey() == Int64Type.getDescriptor() - || entry.getKey() == BytesType.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 2, success); - } else { - assertEquals(entry.getKey().getFullName(), 1, success); - } - } - } - - @Test - public void testAllRepeatedTypesWithLimits() throws Exception { - for (Map.Entry entry : AllRepeatedTypesToDebugMessageTest.entrySet()) { - int success = 0; - for (JSONObject json : simpleJSONArrays) { - try { - LOG.info("Testing " + json + " over " + entry.getKey().getFullName()); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(entry.getKey(), json); - LOG.info("Convert Success!"); - assertEquals( - protoMsg.toString(), - protoMsg, - AllRepeatedTypesToCorrectProto.get(entry.getKey())[success]); - success += 1; - } catch (IllegalArgumentException e) { - LOG.info(e.getMessage()); - assertTrue( - e.getMessage() - .equals( - "JSONObject does not have a " - + entry.getValue() - + " field at root.test_repeated[0].") - || e.getMessage() - .equals("Error: root.test_repeated[0] could not be converted to byte[].")); - } - } - if (entry.getKey() == RepeatedInt64.getDescriptor() - || entry.getKey() == RepeatedDouble.getDescriptor()) { - assertEquals(entry.getKey().getFullName(), 2, success); - } else { - assertEquals(entry.getKey().getFullName(), 1, success); - } - } - } - - @Test - public void testOptional() throws Exception { - TestInt64 expectedProto = TestInt64.newBuilder().setByte(1).build(); - JSONObject json = new JSONObject(); - json.put("byte", 1); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testRepeatedIsOptional() throws Exception { - TestRepeatedIsOptional expectedProto = - TestRepeatedIsOptional.newBuilder().setRequiredDouble(1.1).build(); - JSONObject json = new JSONObject(); - json.put("required_double", 1.1); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestRepeatedIsOptional.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testRequired() throws Exception { - JSONObject json = new JSONObject(); - json.put("optional_double", 1.1); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestRequired.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have the required field root.required_double.", e.getMessage()); - } - } - - @Test - public void testStructSimple() throws Exception { - MessageType expectedProto = - MessageType.newBuilder() - .setTestFieldType(StringType.newBuilder().setTestFieldType("test").build()) - .build(); - JSONObject stringType = new JSONObject(); - stringType.put("test_field_type", "test"); - JSONObject json = new JSONObject(); - json.put("test_field_type", stringType); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(MessageType.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testStructSimpleFail() throws Exception { - JSONObject stringType = new JSONObject(); - stringType.put("test_field_type", 1); - JSONObject json = new JSONObject(); - json.put("test_field_type", stringType); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(MessageType.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a string field at root.test_field_type.test_field_type.", - e.getMessage()); - } - } - - @Test - public void testStructComplex() throws Exception { - ComplexRoot expectedProto = - ComplexRoot.newBuilder() - .setTestInt(1) - .addTestString("a") - .addTestString("b") - .addTestString("c") - .setTestBytes(ByteString.copyFrom("hello".getBytes())) - .setTestBool(true) - .addTestDouble(1.1) - .addTestDouble(2.2) - .addTestDouble(3.3) - .addTestDouble(4.4) - .setTestDate(1) - .setTestDatetime(1) - .addTestDatetimeStr(142258614586538368L) - .addTestDatetimeStr(142258525253402624L) - .setComplexLvl1( - ComplexLvl1.newBuilder() - .setTestInt(2) - .setComplexLvl2(ComplexLvl2.newBuilder().setTestInt(3).build()) - .build()) - .setComplexLvl2(ComplexLvl2.newBuilder().setTestInt(3).build()) - .setTestNumeric( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.23456"))) - .setTestGeo("POINT(1,1)") - .setTestTimestamp(12345678) - .setTestTime(CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(1, 0, 1))) - .setTestTimeStr(89332507144L) - .addTestNumericRepeated( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0"))) - .addTestNumericRepeated( - BigDecimalByteStringEncoder.encodeToNumericByteString( - new BigDecimal("99999999999999999999999999999.999999999"))) - .addTestNumericRepeated( - BigDecimalByteStringEncoder.encodeToNumericByteString( - new BigDecimal("-99999999999999999999999999999.999999999"))) - .setTestNumericStr( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("12.4"))) - .setTestNumericShort( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(1))) - .setTestNumericInt( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(2))) - .setTestNumericLong( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(3L))) - .setTestNumericFloat( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(4f))) - .setTestNumericDouble( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(5D))) - .setTestBignumeric( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("2.3"))) - .addTestBignumericStr( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.23"))) - .setTestBignumericShort( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(1))) - .setTestBignumericInt( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(2))) - .setTestBignumericLong( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(3L))) - .setTestBignumericFloat( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(4f))) - .setTestBignumericDouble( - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal(5D))) - .setTestInterval("0-0 0 0:0:0.000005") - .addTestJson("{'a':'b'}") - .build(); - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("test_int", 3); - - JSONObject complex_lvl1 = new JSONObject(); - complex_lvl1.put("test_int", 2); - complex_lvl1.put("complex_lvl2", complex_lvl2); - - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("test_string", new JSONArray(new String[] {"a", "b", "c"})); - json.put("test_bytes", ByteString.copyFromUtf8("hello")); - json.put("test_bool", true); - json.put("test_DOUBLe", new JSONArray(new Double[] {1.1, 2.2, 3.3, 4.4})); - json.put("test_date", 1); - json.put("test_datetime", 1); - json.put( - "test_datetime_str", - new JSONArray(new String[] {"2021-09-27T20:51:10.752", "2021-09-27T00:00:00"})); - json.put("complex_lvl1", complex_lvl1); - json.put("complex_lvl2", complex_lvl2); - json.put( - "test_numeric", - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.23456"))); - json.put( - "test_numeric_repeated", - new JSONArray( - new byte[][] { - BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("0")) - .toByteArray(), - BigDecimalByteStringEncoder.encodeToNumericByteString( - new BigDecimal("99999999999999999999999999999.999999999")) - .toByteArray(), - BigDecimalByteStringEncoder.encodeToNumericByteString( - new BigDecimal("-99999999999999999999999999999.999999999")) - .toByteArray(), - })); - json.put("test_geo", "POINT(1,1)"); - json.put("test_timestamp", 12345678); - json.put( - "test_time", CivilTimeEncoder.encodePacked64TimeMicrosLocalTime(LocalTime.of(1, 0, 1))); - json.put("test_time_str", "20:51:10.1234"); - json.put("test_numeric_str", "12.4"); - json.put("test_numeric_short", 1); - json.put("test_numeric_int", 2); - json.put("test_numeric_long", 3L); - json.put("test_numeric_float", 4f); - json.put("test_numeric_double", 5D); - json.put( - "test_bignumeric", - BigDecimalByteStringEncoder.encodeToNumericByteString(BigDecimal.valueOf(2.3))); - json.put("test_bignumeric_str", new JSONArray(new String[] {"1.23"})); - json.put("test_bignumeric_short", 1); - json.put("test_bignumeric_int", 2); - json.put("test_bignumeric_long", 3L); - json.put("test_bignumeric_float", 4f); - json.put("test_bignumeric_double", 5D); - json.put("test_interval", "0-0 0 0:0:0.000005"); - json.put("test_json", new JSONArray(new String[] {"{'a':'b'}"})); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage( - ComplexRoot.getDescriptor(), COMPLEX_TABLE_SCHEMA, json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testStructComplexFail() throws Exception { - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("test_int", 3); - - JSONObject complex_lvl1 = new JSONObject(); - complex_lvl1.put("test_int", "not_int"); - complex_lvl1.put("complex_lvl2", complex_lvl2); - - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("test_string", new JSONArray(new String[] {"a", "b", "c"})); - json.put("test_bytes", ByteString.copyFromUtf8("hello")); - json.put("test_bool", true); - json.put("test_double", new JSONArray(new Double[] {1.1, 2.2, 3.3, 4.4})); - json.put("test_date", 1); - json.put("complex_lvl1", complex_lvl1); - json.put("complex_lvl2", complex_lvl2); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexRoot.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a int64 field at root.complex_lvl1.test_int.", e.getMessage()); - } - } - - @Test - public void testRepeatedWithMixedTypes() throws Exception { - JSONObject json = new JSONObject(); - json.put("test_repeated", new JSONArray("[1.1, 2.2, true]")); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(RepeatedDouble.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a double field at root.test_repeated[0].", e.getMessage()); - } - } - - @Test - public void testNestedRepeatedComplex() throws Exception { - NestedRepeated expectedProto = - NestedRepeated.newBuilder() - .addDouble(1.1) - .addDouble(2.2) - .addDouble(3.3) - .addDouble(4.4) - .addDouble(5.5) - .addInt(1) - .addInt(2) - .addInt(3) - .addInt(4) - .addInt(5) - .setRepeatedString( - RepeatedString.newBuilder() - .addTestRepeated("hello") - .addTestRepeated("this") - .addTestRepeated("is") - .addTestRepeated("a") - .addTestRepeated("test") - .build()) - .build(); - double[] doubleArr = {1.1, 2.2, 3.3, 4.4, 5.5}; - String[] stringArr = {"hello", "this", "is", "a", "test"}; - int[] intArr = {1, 2, 3, 4, 5}; - - JSONObject json = new JSONObject(); - json.put("double", new JSONArray(doubleArr)); - json.put("int", new JSONArray(intArr)); - JSONObject jsonRepeatedString = new JSONObject(); - jsonRepeatedString.put("test_repeated", new JSONArray(stringArr)); - json.put("repeated_string", jsonRepeatedString); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(NestedRepeated.getDescriptor(), json); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testNestedRepeatedComplexFail() throws Exception { - double[] doubleArr = {1.1, 2.2, 3.3, 4.4, 5.5}; - Boolean[] fakeStringArr = {true, false}; - int[] intArr = {1, 2, 3, 4, 5}; - - JSONObject json = new JSONObject(); - json.put("double", new JSONArray(doubleArr)); - json.put("int", new JSONArray(intArr)); - JSONObject jsonRepeatedString = new JSONObject(); - jsonRepeatedString.put("test_repeated", new JSONArray(fakeStringArr)); - json.put("repeated_string", jsonRepeatedString); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(NestedRepeated.getDescriptor(), json); - Assert.fail("should fail"); - } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a string field at root.repeated_string.test_repeated[0].", - e.getMessage()); - } - } - - @Test - public void testEmptySecondLevelObject() throws Exception { - ComplexLvl1 expectedProto = - ComplexLvl1.newBuilder() - .setTestInt(1) - .setComplexLvl2(ComplexLvl2.newBuilder().build()) - .build(); - JSONObject complexLvl2 = new JSONObject(); - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("complex_lvl2", complexLvl2); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexLvl1.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testAllowUnknownFieldsError() throws Exception { - JSONObject json = new JSONObject(); - json.put("test_repeated", new JSONArray(new int[] {1, 2, 3, 4, 5})); - json.put("string", "hello"); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(RepeatedInt64.getDescriptor(), json); - Assert.fail("Should fail"); - } catch (IllegalArgumentException e) { - assertEquals("JSONObject has fields unknown to BigQuery: root.string.", e.getMessage()); - } - } - - @Test - public void testEmptyProtoMessage() throws Exception { - JSONObject json = new JSONObject(); - json.put("test_repeated", new JSONArray(new int[0])); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(RepeatedInt64.getDescriptor(), json); - Assert.fail("Should fail"); - } catch (IllegalArgumentException e) { - assertEquals("The created protobuf message is empty.", e.getMessage()); - } - } - - @Test - public void testEmptyJSONObject() throws Exception { - JSONObject json = new JSONObject(); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(Int64Type.getDescriptor(), json); - Assert.fail("Should fail"); - } catch (IllegalStateException e) { - assertEquals("JSONObject is empty.", e.getMessage()); - } - } - - @Test - public void testNullJson() throws Exception { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(Int64Type.getDescriptor(), null); - Assert.fail("Should fail"); - } catch (NullPointerException e) { - assertEquals("JSONObject is null.", e.getMessage()); - } - } - - @Test - public void testNullDescriptor() throws Exception { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(null, new JSONObject()); - Assert.fail("Should fail"); - } catch (NullPointerException e) { - assertEquals("Protobuf descriptor is null.", e.getMessage()); - } - } - - @Test - public void testAllowUnknownFieldsSecondLevel() throws Exception { - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("no_match", 1); - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("complex_lvl2", complex_lvl2); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexLvl1.getDescriptor(), json); - Assert.fail("Should fail"); - } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject has fields unknown to BigQuery: root.complex_lvl2.no_match.", e.getMessage()); - } - } - - @Test - public void testTopLevelMatchSecondLevelMismatch() throws Exception { - ComplexLvl1 expectedProto = - ComplexLvl1.newBuilder() - .setTestInt(1) - .setComplexLvl2(ComplexLvl2.newBuilder().build()) - .build(); - JSONObject complex_lvl2 = new JSONObject(); - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("complex_lvl2", complex_lvl2); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexLvl1.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } - - @Test - public void testJsonNullValue() throws Exception { - TestInt64 expectedProto = TestInt64.newBuilder().setInt(1).build(); - JSONObject json = new JSONObject(); - json.put("long", JSONObject.NULL); - json.put("int", 1); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json); - assertEquals(expectedProto, protoMsg); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java index 547ad399e1..c147e00be8 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java index 09e6d739fe..a5cefb788f 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java index af7e293f66..545a0dbae9 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java index d2d4c881ae..04c596ff52 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaConverterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaConverterTest.java deleted file mode 100644 index b9c9578bca..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaConverterTest.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import com.google.api.gax.rpc.InvalidArgumentException; -import com.google.cloud.bigquery.storage.test.Test.*; -import com.google.protobuf.DescriptorProtos.FileDescriptorProto; -import com.google.protobuf.Descriptors; -import org.junit.*; - -public class ProtoSchemaConverterTest { - @Test - public void convertSimple() { - AllSupportedTypes testProto = AllSupportedTypes.newBuilder().setStringValue("abc").build(); - ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.assertEquals( - "name: \"com_google_cloud_bigquery_storage_test_AllSupportedTypes\"\n" - + "field {\n" - + " name: \"int32_value\"\n" - + " number: 1\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_INT32\n" - + "}\n" - + "field {\n" - + " name: \"int64_value\"\n" - + " number: 2\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_INT64\n" - + "}\n" - + "field {\n" - + " name: \"uint32_value\"\n" - + " number: 3\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_UINT32\n" - + "}\n" - + "field {\n" - + " name: \"uint64_value\"\n" - + " number: 4\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_UINT64\n" - + "}\n" - + "field {\n" - + " name: \"float_value\"\n" - + " number: 5\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_FLOAT\n" - + "}\n" - + "field {\n" - + " name: \"double_value\"\n" - + " number: 6\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_DOUBLE\n" - + "}\n" - + "field {\n" - + " name: \"bool_value\"\n" - + " number: 7\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_BOOL\n" - + "}\n" - + "field {\n" - + " name: \"enum_value\"\n" - + " number: 8\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_ENUM\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_TestEnum_E.TestEnum\"\n" - + "}\n" - + "field {\n" - + " name: \"string_value\"\n" - + " number: 9\n" - + " label: LABEL_REQUIRED\n" - + " type: TYPE_STRING\n" - + "}\n" - + "nested_type {\n" - + " name: \"com_google_cloud_bigquery_storage_test_TestEnum_E\"\n" - + " enum_type {\n" - + " name: \"TestEnum\"\n" - + " value {\n" - + " name: \"TestEnum0\"\n" - + " number: 0\n" - + " }\n" - + " value {\n" - + " name: \"TestEnum1\"\n" - + " number: 1\n" - + " }\n" - + " }\n" - + "}\n", - protoSchema.getProtoDescriptor().toString()); - } - - @Test - public void convertNested() { - ComplicateType testProto = ComplicateType.newBuilder().build(); - ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.assertEquals( - "name: \"com_google_cloud_bigquery_storage_test_ComplicateType\"\n" - + "field {\n" - + " name: \"nested_repeated_type\"\n" - + " number: 1\n" - + " label: LABEL_REPEATED\n" - + " type: TYPE_MESSAGE\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_NestedType\"\n" - + "}\n" - + "field {\n" - + " name: \"inner_type\"\n" - + " number: 2\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_MESSAGE\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" - + "}\n" - + "nested_type {\n" - + " name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" - + " field {\n" - + " name: \"value\"\n" - + " number: 1\n" - + " label: LABEL_REPEATED\n" - + " type: TYPE_STRING\n" - + " }\n" - + "}\n" - + "nested_type {\n" - + " name: \"com_google_cloud_bigquery_storage_test_NestedType\"\n" - + " field {\n" - + " name: \"inner_type\"\n" - + " number: 1\n" - + " label: LABEL_REPEATED\n" - + " type: TYPE_MESSAGE\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" - + " }\n" - + "}\n", - protoSchema.getProtoDescriptor().toString()); - } - - @Test - public void convertRecursive() { - try { - RecursiveType testProto = RecursiveType.newBuilder().build(); - ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - Assert.assertEquals( - "Recursive type is not supported:com.google.cloud.bigquery.storage.test.RecursiveType", - e.getMessage()); - } - } - - @Test - public void convertRecursiveTopMessage() { - try { - RecursiveTypeTopMessage testProto = RecursiveTypeTopMessage.newBuilder().build(); - ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - Assert.assertEquals( - "Recursive type is not" - + " supported:com.google.cloud.bigquery.storage.test.RecursiveTypeTopMessage", - e.getMessage()); - } - } - - @Test - public void convertDuplicateType() { - DuplicateType testProto = DuplicateType.newBuilder().build(); - ProtoSchema protoSchema = ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - - FileDescriptorProto fileDescriptorProto = - FileDescriptorProto.newBuilder() - .setName("foo.proto") - .addMessageType(protoSchema.getProtoDescriptor()) - .build(); - try { - Descriptors.FileDescriptor fs = - Descriptors.FileDescriptor.buildFrom( - fileDescriptorProto, new Descriptors.FileDescriptor[0]); - Descriptors.Descriptor type = - fs.findMessageTypeByName(protoSchema.getProtoDescriptor().getName()); - Assert.assertEquals(4, type.getFields().size()); - } catch (Descriptors.DescriptorValidationException ex) { - Assert.fail("Got unexpected exception: " + ex.getMessage()); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/StreamWriterV2Test.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/StreamWriterV2Test.java deleted file mode 100644 index de541e7932..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/StreamWriterV2Test.java +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1beta2; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; - -import com.google.api.core.ApiFuture; -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.api.gax.rpc.ApiException; -import com.google.api.gax.rpc.StatusCode.Code; -import com.google.cloud.bigquery.storage.test.Test.FooType; -import com.google.common.base.Strings; -import com.google.protobuf.DescriptorProtos; -import com.google.protobuf.Int64Value; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import java.io.IOException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.logging.Logger; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.function.ThrowingRunnable; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class StreamWriterV2Test { - private static final Logger log = Logger.getLogger(StreamWriterV2Test.class.getName()); - private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/s"; - private static final String TEST_TRACE_ID = "DATAFLOW:job_id"; - private FakeScheduledExecutorService fakeExecutor; - private FakeBigQueryWrite testBigQueryWrite; - private static MockServiceHelper serviceHelper; - private BigQueryWriteClient client; - - @Before - public void setUp() throws Exception { - testBigQueryWrite = new FakeBigQueryWrite(); - serviceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); - serviceHelper.start(); - fakeExecutor = new FakeScheduledExecutorService(); - testBigQueryWrite.setExecutor(fakeExecutor); - client = - BigQueryWriteClient.create( - BigQueryWriteSettings.newBuilder() - .setCredentialsProvider(NoCredentialsProvider.create()) - .setTransportChannelProvider(serviceHelper.createChannelProvider()) - .build()); - } - - @After - public void tearDown() throws Exception { - log.info("tearDown called"); - client.close(); - serviceHelper.stop(); - } - - private StreamWriterV2 getTestStreamWriterV2() throws IOException { - return StreamWriterV2.newBuilder(TEST_STREAM, client) - .setWriterSchema(createProtoSchema()) - .setTraceId(TEST_TRACE_ID) - .build(); - } - - private ProtoSchema createProtoSchema() { - return ProtoSchema.newBuilder() - .setProtoDescriptor( - DescriptorProtos.DescriptorProto.newBuilder() - .setName("Message") - .addField( - DescriptorProtos.FieldDescriptorProto.newBuilder() - .setName("foo") - .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING) - .setNumber(1) - .build()) - .build()) - .build(); - } - - private ProtoRows createProtoRows(String[] messages) { - ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); - for (String message : messages) { - FooType foo = FooType.newBuilder().setFoo(message).build(); - rowsBuilder.addSerializedRows(foo.toByteString()); - } - return rowsBuilder.build(); - } - - private AppendRowsResponse createAppendResponse(long offset) { - return AppendRowsResponse.newBuilder() - .setAppendResult( - AppendRowsResponse.AppendResult.newBuilder().setOffset(Int64Value.of(offset)).build()) - .build(); - } - - private AppendRowsResponse createAppendResponseWithError(Status.Code code, String message) { - return AppendRowsResponse.newBuilder() - .setError(com.google.rpc.Status.newBuilder().setCode(code.value()).setMessage(message)) - .build(); - } - - private ApiFuture sendTestMessage(StreamWriterV2 writer, String[] messages) { - return writer.append(createProtoRows(messages), -1); - } - - private static T assertFutureException( - Class expectedThrowable, final Future future) { - return assertThrows( - expectedThrowable, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - try { - future.get(); - } catch (ExecutionException ex) { - // Future wraps exception with ExecutionException. So unwrapper it here. - throw ex.getCause(); - } - } - }); - } - - private void verifyAppendIsBlocked(final StreamWriterV2 writer) throws Exception { - Thread appendThread = - new Thread( - new Runnable() { - @Override - public void run() { - sendTestMessage(writer, new String[] {"A"}); - } - }); - // Start a separate thread to append and verify that it is still alive after 2 seoncds. - appendThread.start(); - TimeUnit.SECONDS.sleep(2); - assertTrue(appendThread.isAlive()); - appendThread.interrupt(); - } - - private void verifyAppendRequests(long appendCount) { - assertEquals(appendCount, testBigQueryWrite.getAppendRequests().size()); - for (int i = 0; i < appendCount; i++) { - AppendRowsRequest serverRequest = testBigQueryWrite.getAppendRequests().get(i); - assertTrue(serverRequest.getProtoRows().getRows().getSerializedRowsCount() > 0); - assertEquals(i, serverRequest.getOffset().getValue()); - if (i == 0) { - // First request received by server should have schema and stream name. - assertTrue(serverRequest.getProtoRows().hasWriterSchema()); - assertEquals(serverRequest.getWriteStream(), TEST_STREAM); - assertEquals(serverRequest.getTraceId(), TEST_TRACE_ID); - } else { - // Following request should not have schema and stream name. - assertFalse(serverRequest.getProtoRows().hasWriterSchema()); - assertEquals(serverRequest.getWriteStream(), ""); - assertEquals(serverRequest.getTraceId(), ""); - } - } - } - - @Test - public void testBuildBigQueryWriteClientInWriter() throws Exception { - StreamWriterV2 writer = - StreamWriterV2.newBuilder(TEST_STREAM) - .setCredentialsProvider(NoCredentialsProvider.create()) - .setChannelProvider(serviceHelper.createChannelProvider()) - .setWriterSchema(createProtoSchema()) - .build(); - - testBigQueryWrite.addResponse(createAppendResponse(0)); - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); - writer.close(); - } - - @Test - public void testAppendSuccess() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - - long appendCount = 100; - for (int i = 0; i < appendCount; i++) { - testBigQueryWrite.addResponse(createAppendResponse(i)); - } - - List> futures = new ArrayList<>(); - for (int i = 0; i < appendCount; i++) { - futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); - } - - for (int i = 0; i < appendCount; i++) { - assertEquals(i, futures.get(i).get().getAppendResult().getOffset().getValue()); - } - - verifyAppendRequests(appendCount); - - writer.close(); - } - - @Test - public void testNoSchema() throws Exception { - StatusRuntimeException ex = - assertThrows( - StatusRuntimeException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriterV2.newBuilder(TEST_STREAM, client).build(); - } - }); - assertEquals(ex.getStatus().getCode(), Status.INVALID_ARGUMENT.getCode()); - assertTrue(ex.getStatus().getDescription().contains("Writer schema must be provided")); - } - - @Test - public void testInvalidTraceId() throws Exception { - assertThrows( - IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriterV2.newBuilder(TEST_STREAM).setTraceId("abc"); - } - }); - assertThrows( - IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriterV2.newBuilder(TEST_STREAM).setTraceId("abc:"); - } - }); - assertThrows( - IllegalArgumentException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - StreamWriterV2.newBuilder(TEST_STREAM).setTraceId(":abc"); - } - }); - } - - @Test - public void testAppendSuccessAndConnectionError() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - testBigQueryWrite.addResponse(createAppendResponse(0)); - testBigQueryWrite.addException(Status.INTERNAL.asException()); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - - assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); - ApiException actualError = assertFutureException(ApiException.class, appendFuture2); - assertEquals(Code.INTERNAL, actualError.getStatusCode().getCode()); - - writer.close(); - } - - @Test - public void testAppendSuccessAndInStreamError() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - testBigQueryWrite.addResponse(createAppendResponse(0)); - testBigQueryWrite.addResponse( - createAppendResponseWithError(Status.INVALID_ARGUMENT.getCode(), "test message")); - testBigQueryWrite.addResponse(createAppendResponse(1)); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - ApiFuture appendFuture3 = sendTestMessage(writer, new String[] {"C"}); - - assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); - StatusRuntimeException actualError = - assertFutureException(StatusRuntimeException.class, appendFuture2); - assertEquals(Status.Code.INVALID_ARGUMENT, actualError.getStatus().getCode()); - assertEquals("test message", actualError.getStatus().getDescription()); - assertEquals(1, appendFuture3.get().getAppendResult().getOffset().getValue()); - - writer.close(); - } - - @Test - public void longIdleBetweenAppends() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - testBigQueryWrite.addResponse(createAppendResponse(0)); - testBigQueryWrite.addResponse(createAppendResponse(1)); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); - - // Sleep to create a long idle between appends. - TimeUnit.SECONDS.sleep(3); - - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - assertEquals(1, appendFuture2.get().getAppendResult().getOffset().getValue()); - - writer.close(); - } - - @Test - public void testAppendAfterUserClose() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - testBigQueryWrite.addResponse(createAppendResponse(0)); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - writer.close(); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - - assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); - assertTrue(appendFuture2.isDone()); - StatusRuntimeException actualError = - assertFutureException(StatusRuntimeException.class, appendFuture2); - assertEquals(Status.Code.FAILED_PRECONDITION, actualError.getStatus().getCode()); - } - - @Test - public void testAppendAfterServerClose() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - testBigQueryWrite.addException(Status.INTERNAL.asException()); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiException error1 = assertFutureException(ApiException.class, appendFuture1); - assertEquals(Code.INTERNAL, error1.getStatusCode().getCode()); - - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - assertTrue(appendFuture2.isDone()); - StatusRuntimeException error2 = - assertFutureException(StatusRuntimeException.class, appendFuture2); - assertEquals(Status.Code.FAILED_PRECONDITION, error2.getStatus().getCode()); - - writer.close(); - } - - @Test - public void userCloseWhileRequestInflight() throws Exception { - final StreamWriterV2 writer = getTestStreamWriterV2(); - // Server will sleep 2 seconds before sending back the response. - testBigQueryWrite.setResponseSleep(Duration.ofSeconds(2)); - testBigQueryWrite.addResponse(createAppendResponse(0)); - - // Send a request and close the stream in separate thread while the request is inflight. - final ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - Thread closeThread = - new Thread( - new Runnable() { - @Override - public void run() { - writer.close(); - } - }); - closeThread.start(); - - // Due to the sleep on server, the append won't finish within 1 second even though stream - // is being closed. - assertThrows( - TimeoutException.class, - new ThrowingRunnable() { - @Override - public void run() throws Throwable { - appendFuture1.get(1, TimeUnit.SECONDS); - } - }); - - // Within 2 seconds, the request should be done and stream should be closed. - closeThread.join(2000); - assertTrue(appendFuture1.isDone()); - assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); - } - - @Test - public void serverCloseWhileRequestsInflight() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - // Server will sleep 2 seconds before closing the connection. - testBigQueryWrite.setResponseSleep(Duration.ofSeconds(2)); - testBigQueryWrite.addException(Status.INTERNAL.asException()); - - // Send 10 requests, so that there are 10 inflight requests. - int appendCount = 10; - List> futures = new ArrayList<>(); - for (int i = 0; i < appendCount; i++) { - futures.add(sendTestMessage(writer, new String[] {String.valueOf(i)})); - } - - // Server close should properly handle all inflight requests. - for (int i = 0; i < appendCount; i++) { - ApiException actualError = assertFutureException(ApiException.class, futures.get(i)); - assertEquals(Code.INTERNAL, actualError.getStatusCode().getCode()); - } - - writer.close(); - } - - @Test - public void testZeroMaxInflightRequests() throws Exception { - StreamWriterV2 writer = - StreamWriterV2.newBuilder(TEST_STREAM, client) - .setWriterSchema(createProtoSchema()) - .setMaxInflightRequests(0) - .build(); - testBigQueryWrite.addResponse(createAppendResponse(0)); - verifyAppendIsBlocked(writer); - writer.close(); - } - - @Test - public void testZeroMaxInflightBytes() throws Exception { - StreamWriterV2 writer = - StreamWriterV2.newBuilder(TEST_STREAM, client) - .setWriterSchema(createProtoSchema()) - .setMaxInflightBytes(0) - .build(); - testBigQueryWrite.addResponse(createAppendResponse(0)); - verifyAppendIsBlocked(writer); - writer.close(); - } - - @Test - public void testOneMaxInflightRequests() throws Exception { - StreamWriterV2 writer = - StreamWriterV2.newBuilder(TEST_STREAM, client) - .setWriterSchema(createProtoSchema()) - .setMaxInflightRequests(1) - .build(); - // Server will sleep 1 second before every response. - testBigQueryWrite.setResponseSleep(Duration.ofSeconds(1)); - testBigQueryWrite.addResponse(createAppendResponse(0)); - - long appendStartTimeMs = System.currentTimeMillis(); - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - long appendElapsedMs = System.currentTimeMillis() - appendStartTimeMs; - assertTrue(appendElapsedMs >= 1000); - assertEquals(0, appendFuture1.get().getAppendResult().getOffset().getValue()); - writer.close(); - } - - @Test - public void testAppendsWithTinyMaxInflightBytes() throws Exception { - StreamWriterV2 writer = - StreamWriterV2.newBuilder(TEST_STREAM, client) - .setWriterSchema(createProtoSchema()) - .setMaxInflightBytes(1) - .build(); - // Server will sleep 100ms before every response. - testBigQueryWrite.setResponseSleep(Duration.ofMillis(100)); - long appendCount = 10; - for (int i = 0; i < appendCount; i++) { - testBigQueryWrite.addResponse(createAppendResponse(i)); - } - - List> futures = new ArrayList<>(); - long appendStartTimeMs = System.currentTimeMillis(); - for (int i = 0; i < appendCount; i++) { - futures.add(writer.append(createProtoRows(new String[] {String.valueOf(i)}), i)); - } - long appendElapsedMs = System.currentTimeMillis() - appendStartTimeMs; - assertTrue(appendElapsedMs >= 1000); - - for (int i = 0; i < appendCount; i++) { - assertEquals(i, futures.get(i).get().getAppendResult().getOffset().getValue()); - } - assertEquals(appendCount, testBigQueryWrite.getAppendRequests().size()); - for (int i = 0; i < appendCount; i++) { - assertEquals(i, testBigQueryWrite.getAppendRequests().get(i).getOffset().getValue()); - } - writer.close(); - } - - @Test - public void testMessageTooLarge() throws Exception { - StreamWriterV2 writer = getTestStreamWriterV2(); - - String oversized = Strings.repeat("a", (int) (StreamWriterV2.getApiMaxRequestBytes() + 1)); - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {oversized}); - assertTrue(appendFuture1.isDone()); - StatusRuntimeException actualError = - assertFutureException(StatusRuntimeException.class, appendFuture1); - assertEquals(Status.Code.INVALID_ARGUMENT, actualError.getStatus().getCode()); - assertTrue(actualError.getStatus().getDescription().contains("MessageSize is too large")); - - writer.close(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageLongRunningTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageLongRunningTest.java index 62a0b226ec..44650a08a2 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageLongRunningTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageLongRunningTest.java @@ -16,7 +16,7 @@ package com.google.cloud.bigquery.storage.v1beta2.it; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.ServiceOptions; @@ -35,16 +35,16 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Integration tests for BigQuery Storage API which target long running sessions. These tests can be * enabled by setting the system property 'bigquery.storage.enable_long_running_tests' to true. */ -public class ITBigQueryStorageLongRunningTest { +class ITBigQueryStorageLongRunningTest { private static final Logger LOG = Logger.getLogger(ITBigQueryStorageLongRunningTest.class.getName()); @@ -61,9 +61,10 @@ public class ITBigQueryStorageLongRunningTest { private static BigQueryReadClient client; private static String parentProjectId; - @BeforeClass - public static void beforeClass() throws IOException { - Assume.assumeTrue(LONG_TESTS_DISABLED_MESSAGE, Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY)); + @BeforeAll + static void beforeAll() throws IOException { + Assumptions.assumeTrue( + Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY), LONG_TESTS_DISABLED_MESSAGE); client = BigQueryReadClient.create(); parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); @@ -73,15 +74,15 @@ public static void beforeClass() throws IOException { ITBigQueryStorageLongRunningTest.class.getSimpleName(), parentProjectId)); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() { if (client != null) { client.close(); } } @Test - public void testLongRunningReadSession() throws InterruptedException, ExecutionException { + void testLongRunningReadSession() throws InterruptedException, ExecutionException { // This test reads a larger table with the goal of doing a simple validation of timeout settings // for a longer running session. @@ -101,22 +102,16 @@ public void testLongRunningReadSession() throws InterruptedException, ExecutionE /* maxStreamCount= */ 5); assertEquals( + 5, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 5, - session.getStreamsCount()); + table, session.toString())); List> tasks = new ArrayList<>(session.getStreamsCount()); for (final ReadStream stream : session.getStreamsList()) { - tasks.add( - new Callable() { - @Override - public Long call() throws Exception { - return readAllRowsFromStream(stream); - } - }); + tasks.add(() -> readAllRowsFromStream(stream)); } ExecutorService executor = Executors.newFixedThreadPool(tasks.size()); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageTest.java index ebece5944c..f48d41acbd 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/ITBigQueryStorageTest.java @@ -18,11 +18,12 @@ import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; import com.google.api.gax.core.FixedCredentialsProvider; import com.google.api.gax.rpc.ServerStream; @@ -75,6 +76,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.UUID; import java.util.logging.Logger; import org.apache.avro.Conversions; import org.apache.avro.LogicalTypes; @@ -82,12 +84,12 @@ import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.avro.util.Utf8; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** Integration tests for BigQuery Storage API. */ -public class ITBigQueryStorageTest { +class ITBigQueryStorageTest { private static final Logger LOG = Logger.getLogger(ITBigQueryStorageTest.class.getName()); private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); @@ -175,8 +177,8 @@ public class ITBigQueryStorageTest { + " \"universe_domain\": \"fake.domain\"\n" + "}"; - @BeforeClass - public static void beforeClass() throws IOException { + @BeforeAll + static void beforeAll() throws IOException { client = BigQueryReadClient.create(); parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId()); @@ -193,8 +195,8 @@ public static void beforeClass() throws IOException { LOG.info("Created test dataset: " + DATASET); } - @AfterClass - public static void afterClass() { + @AfterAll + static void afterAll() { if (client != null) { client.close(); } @@ -206,7 +208,7 @@ public static void afterClass() { } @Test - public void testSimpleRead() { + void testSimpleRead() { String table = BigQueryResource.FormatTableResource( /* projectId= */ "bigquery-public-data", @@ -222,12 +224,12 @@ public void testSimpleRead() { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); @@ -242,9 +244,9 @@ public void testSimpleRead() { } @Test - public void testSimpleReadArrow() { + void testSimpleReadArrow() { String table = - com.google.cloud.bigquery.storage.v1.it.BigQueryResource.FormatTableResource( + com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource.formatTableResource( /* projectId= */ "bigquery-public-data", /* datasetId= */ "samples", /* tableId= */ "shakespeare"); @@ -258,12 +260,12 @@ public void testSimpleReadArrow() { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); // Assert that there are streams available in the session. An empty table may not have // data available. If no sessions are available for an anonymous (cached) table, consider @@ -288,9 +290,9 @@ public void testSimpleReadArrow() { } @Test - public void testRangeType() throws InterruptedException { + void testRangeType() throws InterruptedException { // Create table with Range values. - String tableName = "test_range_type"; + String tableName = "test_range_type" + UUID.randomUUID().toString().substring(0, 8); TableId tableId = TableId.of(DATASET, tableName); QueryJobConfiguration createTable = QueryJobConfiguration.newBuilder( @@ -308,7 +310,7 @@ public void testRangeType() throws InterruptedException { bigquery.query(createTable); String table = - com.google.cloud.bigquery.storage.v1.it.BigQueryResource.FormatTableResource( + com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource.formatTableResource( /* projectId= */ ServiceOptions.getDefaultProjectId(), /* datasetId= */ DATASET, /* tableId= */ tableId.getTable()); @@ -322,12 +324,12 @@ public void testRangeType() throws InterruptedException { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); // Assert that there are streams available in the session. An empty table may not have // data available. If no sessions are available for an anonymous (cached) table, consider @@ -351,7 +353,7 @@ public void testRangeType() throws InterruptedException { } @Test - public void testSimpleReadAndResume() { + void testSimpleReadAndResume() { String table = BigQueryResource.FormatTableResource( /* projectId= */ "bigquery-public-data", @@ -367,12 +369,12 @@ public void testSimpleReadAndResume() { .build(), /* maxStreamCount= */ 1); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); // We have to read some number of rows in order to be able to resume. More details: @@ -396,7 +398,7 @@ public void testSimpleReadAndResume() { } @Test - public void testFilter() throws IOException { + void testFilter() throws IOException { String table = BigQueryResource.FormatTableResource( /* projectId= */ "bigquery-public-data", @@ -420,12 +422,12 @@ public void testFilter() throws IOException { ReadSession session = client.createReadSession(request); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); @@ -455,7 +457,7 @@ public void accept(GenericData.Record record) { } @Test - public void testColumnSelection() throws IOException { + void testColumnSelection() throws IOException { String table = BigQueryResource.FormatTableResource( /* projectId= */ "bigquery-public-data", @@ -483,12 +485,12 @@ public void testColumnSelection() throws IOException { ReadSession session = client.createReadSession(request); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); @@ -498,16 +500,16 @@ public void testColumnSelection() throws IOException { String actualSchemaMessage = String.format( "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); - assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size()); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, avroSchema.getField("word").schema().getType()); + Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("word_count").schema().getType()); + avroSchema.getField("word_count").schema().getType(), + actualSchemaMessage); SimpleRowReader reader = new SimpleRowReader(avroSchema); @@ -536,7 +538,7 @@ public void accept(GenericData.Record record) { } @Test - public void testReadAtSnapshot() throws InterruptedException, IOException { + void testReadAtSnapshot() throws InterruptedException, IOException { Field intFieldSchema = Field.newBuilder("col", LegacySQLTypeName.INTEGER) .setMode(Mode.REQUIRED) @@ -593,8 +595,9 @@ public void accept(GenericData.Record record) { } @Test - public void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { - String partitionedTableName = "test_column_partition_table_by_date"; + void testColumnPartitionedTableByDateField() throws InterruptedException, IOException { + String partitionedTableName = + "test_column_partition_table_by_date" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s (num_field INT64, date_field DATE) " @@ -619,17 +622,17 @@ public void testColumnPartitionedTableByDateField() throws InterruptedException, /* tableId= */ partitionedTableName); List unfilteredRows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + unfilteredRows.toString(), 3, unfilteredRows.size()); + assertEquals(3, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); List partitionFilteredRows = ReadAllRows(/* table= */ table, /* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)"); assertEquals( - "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size()); + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); } @Test - public void testIngestionTimePartitionedTable() throws InterruptedException, IOException { + void testIngestionTimePartitionedTable() throws InterruptedException, IOException { Field intFieldSchema = Field.newBuilder("num_field", LegacySQLTypeName.INTEGER) .setMode(Mode.REQUIRED) @@ -667,18 +670,18 @@ public void testIngestionTimePartitionedTable() throws InterruptedException, IOE /* tableId= */ testTableId.getTable()); List unfilteredRows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + unfilteredRows.toString(), 2, unfilteredRows.size()); + assertEquals(2, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString()); List partitionFilteredRows = ReadAllRows(/* table= */ table, /* filter= */ "_PARTITIONDATE > \"2019-01-01\""); assertEquals( - "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size()); + 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString()); assertEquals(2L, partitionFilteredRows.get(0).get("num_field")); } @Test - public void testBasicSqlTypes() throws InterruptedException, IOException { - String tableName = "test_basic_sql_types"; + void testBasicSqlTypes() throws InterruptedException, IOException { + String tableName = "test_basic_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -710,7 +713,7 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { /* tableId= */ tableName); List rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -720,22 +723,22 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 6, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(6, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, avroSchema.getField("int_field").schema().getType()); - assertEquals(rowAssertMessage, 17L, (long) record.get("int_field")); + Schema.Type.LONG, avroSchema.getField("int_field").schema().getType(), actualSchemaMessage); + assertEquals(17L, (long) record.get("int_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BYTES, - avroSchema.getField("num_field").schema().getType()); + avroSchema.getField("num_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.decimal(/* precision= */ 38, /* scale= */ 9), - avroSchema.getField("num_field").schema().getLogicalType()); + avroSchema.getField("num_field").schema().getLogicalType(), + actualSchemaMessage); BigDecimal actual_num_field = new Conversions.DecimalConversion() .fromBytes( @@ -743,45 +746,46 @@ public void testBasicSqlTypes() throws InterruptedException, IOException { avroSchema, avroSchema.getField("num_field").schema().getLogicalType()); assertEquals( - rowAssertMessage, BigDecimal.valueOf(/* unscaledVal= */ 1_234_560_000_000L, /* scale= */ 9), - actual_num_field); + actual_num_field, + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.DOUBLE, - avroSchema.getField("float_field").schema().getType()); + avroSchema.getField("float_field").schema().getType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, /* expected= */ 6.547678d, /* actual= */ (double) record.get("float_field"), - /* delta= */ 0.0001); + /* delta= */ 0.0001, + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BOOLEAN, - avroSchema.getField("bool_field").schema().getType()); - assertEquals(rowAssertMessage, true, record.get("bool_field")); + avroSchema.getField("bool_field").schema().getType(), + actualSchemaMessage); + assertEquals(true, record.get("bool_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("str_field").schema().getType()); - assertEquals(rowAssertMessage, new Utf8("String field value"), record.get("str_field")); + avroSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("String field value"), record.get("str_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.BYTES, - avroSchema.getField("bytes_field").schema().getType()); + avroSchema.getField("bytes_field").schema().getType(), + actualSchemaMessage); assertArrayEquals( - rowAssertMessage, Utf8.getBytesFor("абвгд"), - ((ByteBuffer) (record.get("bytes_field"))).array()); + ((ByteBuffer) (record.get("bytes_field"))).array(), + rowAssertMessage); } @Test - public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { - String tableName = "test_date_and_time_sql_types"; + void testDateAndTimeSqlTypes() throws InterruptedException, IOException { + String tableName = + "test_date_and_time_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -809,7 +813,7 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { /* tableId= */ tableName); List rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -819,56 +823,56 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 4, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(4, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.INT, avroSchema.getField("date_field").schema().getType()); + Schema.Type.INT, avroSchema.getField("date_field").schema().getType(), actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.date(), - avroSchema.getField("date_field").schema().getLogicalType()); + avroSchema.getField("date_field").schema().getLogicalType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, LocalDate.of(/* year= */ 2019, /* month= */ 5, /* dayOfMonth= */ 31), - LocalDate.ofEpochDay((int) record.get("date_field"))); + LocalDate.ofEpochDay((int) record.get("date_field")), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("datetime_field").schema().getType()); + avroSchema.getField("datetime_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, "datetime", - avroSchema.getField("datetime_field").schema().getObjectProp("logicalType")); + avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"), + actualSchemaMessage); assertEquals( - rowAssertMessage, new Utf8("2019-04-30T21:47:59.999999"), - (Utf8) record.get("datetime_field")); + (Utf8) record.get("datetime_field"), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("time_field").schema().getType()); + avroSchema.getField("time_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.timeMicros(), - avroSchema.getField("time_field").schema().getLogicalType()); + avroSchema.getField("time_field").schema().getLogicalType(), + actualSchemaMessage); assertEquals( - rowAssertMessage, LocalTime.of( /* hour= */ 21, /* minute= */ 47, /* second= */ 59, /* nanoOfSecond= */ 999_999_000), - LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field"))); + LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")), + rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("timestamp_field").schema().getType()); + avroSchema.getField("timestamp_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, LogicalTypes.timestampMicros(), - avroSchema.getField("timestamp_field").schema().getLogicalType()); + avroSchema.getField("timestamp_field").schema().getLogicalType(), + actualSchemaMessage); ZonedDateTime expected_timestamp = ZonedDateTime.parse( "2019-04-30T19:24:19Z", DateTimeFormatter.ISO_INSTANT.withZone(ZoneOffset.UTC)) @@ -880,12 +884,12 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException { /* epochSecond= */ actual_timestamp_micros / 1_000_000, (actual_timestamp_micros % 1_000_000) * 1_000), ZoneOffset.UTC); - assertEquals(rowAssertMessage, expected_timestamp, actual_timestamp); + assertEquals(expected_timestamp, actual_timestamp, rowAssertMessage); } @Test - public void testGeographySqlType() throws InterruptedException, IOException { - String tableName = "test_geography_sql_type"; + void testGeographySqlType() throws InterruptedException, IOException { + String tableName = "test_geography_sql_type" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s " @@ -906,7 +910,7 @@ public void testGeographySqlType() throws InterruptedException, IOException { /* tableId= */ tableName); List rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -916,24 +920,25 @@ public void testGeographySqlType() throws InterruptedException, IOException { "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 1, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(1, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - avroSchema.getField("geo_field").schema().getType()); + avroSchema.getField("geo_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, "GEOGRAPHY", - avroSchema.getField("geo_field").schema().getObjectProp("sqlType")); - assertEquals(rowAssertMessage, new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field")); + avroSchema.getField("geo_field").schema().getObjectProp("sqlType"), + actualSchemaMessage); + assertEquals(new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field"), rowAssertMessage); } @Test - public void testStructAndArraySqlTypes() throws InterruptedException, IOException { - String tableName = "test_struct_and_array_sql_types"; + void testStructAndArraySqlTypes() throws InterruptedException, IOException { + String tableName = + "test_struct_and_array_sql_types" + UUID.randomUUID().toString().substring(0, 8); String createTableStatement = String.format( " CREATE TABLE %s.%s (array_field ARRAY, struct_field STRUCT rows = ReadAllRows(/* table= */ table, /* filter= */ null); - assertEquals("Actual rows read: " + rows.toString(), 1, rows.size()); + assertEquals(1, rows.size(), "Actual rows read: " + rows.toString()); GenericData.Record record = rows.get(0); Schema avroSchema = record.getSchema(); @@ -961,43 +966,43 @@ public void testStructAndArraySqlTypes() throws InterruptedException, IOExceptio "Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true)); String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString()); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType()); - assertEquals(actualSchemaMessage, "__root__", avroSchema.getName()); - assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size()); + assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage); + assertEquals("__root__", avroSchema.getName(), actualSchemaMessage); + assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.ARRAY, - avroSchema.getField("array_field").schema().getType()); + avroSchema.getField("array_field").schema().getType(), + actualSchemaMessage); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - avroSchema.getField("array_field").schema().getElementType().getType()); + avroSchema.getField("array_field").schema().getElementType().getType(), + actualSchemaMessage); assertArrayEquals( - rowAssertMessage, new Long[] {1L, 2L, 3L}, - ((GenericData.Array) record.get("array_field")).toArray(new Long[0])); + ((GenericData.Array) record.get("array_field")).toArray(new Long[0]), + rowAssertMessage); // Validate the STRUCT field and its members. Schema structSchema = avroSchema.getField("struct_field").schema(); - assertEquals(actualSchemaMessage, Schema.Type.RECORD, structSchema.getType()); + assertEquals(Schema.Type.RECORD, structSchema.getType(), actualSchemaMessage); GenericData.Record structRecord = (GenericData.Record) record.get("struct_field"); assertEquals( - actualSchemaMessage, Schema.Type.LONG, - structSchema.getField("int_field").schema().getType()); - assertEquals(rowAssertMessage, 10L, (long) structRecord.get("int_field")); + structSchema.getField("int_field").schema().getType(), + actualSchemaMessage); + assertEquals(10L, (long) structRecord.get("int_field"), rowAssertMessage); assertEquals( - actualSchemaMessage, Schema.Type.STRING, - structSchema.getField("str_field").schema().getType()); - assertEquals(rowAssertMessage, new Utf8("abc"), structRecord.get("str_field")); + structSchema.getField("str_field").schema().getType(), + actualSchemaMessage); + assertEquals(new Utf8("abc"), structRecord.get("str_field"), rowAssertMessage); } @Test - public void testUniverseDomainWithInvalidUniverseDomain() throws IOException { + void testUniverseDomainWithInvalidUniverseDomain() throws IOException { BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder() .setCredentialsProvider( @@ -1012,26 +1017,26 @@ public void testUniverseDomainWithInvalidUniverseDomain() throws IOException { /* datasetId= */ "samples", /* tableId= */ "shakespeare"); - try { - localClient.createReadSession( - /* parent= */ parentProjectId, - /* readSession= */ ReadSession.newBuilder() - .setTable(table) - .setDataFormat(DataFormat.AVRO) - .build(), - /* maxStreamCount= */ 1); - fail("RPCs to invalid universe domain should fail"); - } catch (UnauthenticatedException e) { - assertThat( - (e.getMessage() - .contains("does not match the universe domain found in the credentials"))) - .isTrue(); - } + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); localClient.close(); } @Test - public void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { + void testInvalidUniverseDomainWithMismatchCredentials() throws IOException { BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder() .setCredentialsProvider( @@ -1047,27 +1052,26 @@ public void testInvalidUniverseDomainWithMismatchCredentials() throws IOExceptio /* datasetId= */ "samples", /* tableId= */ "shakespeare"); - try { - ReadSession session = - localClient.createReadSession( - /* parent= */ parentProjectId, - /* readSession= */ ReadSession.newBuilder() - .setTable(table) - .setDataFormat(DataFormat.AVRO) - .build(), - /* maxStreamCount= */ 1); - fail("RPCs to invalid universe domain should fail"); - } catch (UnauthenticatedException e) { - assertThat( - (e.getMessage() - .contains("does not match the universe domain found in the credentials"))) - .isTrue(); - } + UnauthenticatedException e = + assertThrows( + UnauthenticatedException.class, + () -> + localClient.createReadSession( + /* parent= */ parentProjectId, + /* readSession= */ ReadSession.newBuilder() + .setTable(table) + .setDataFormat(DataFormat.AVRO) + .build(), + /* maxStreamCount= */ 1)); + assertThat( + (e.getMessage() + .contains("does not match the universe domain found in the credentials"))) + .isTrue(); localClient.close(); } @Test - public void testUniverseDomainWithMatchingDomain() throws IOException { + void testUniverseDomainWithMatchingDomain() throws IOException { // Test a valid domain using the default credentials and Google default universe domain. BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder().setUniverseDomain("googleapis.com").build(); @@ -1101,10 +1105,10 @@ public void testUniverseDomainWithMatchingDomain() throws IOException { localClient.close(); } - public void testUniverseDomain() throws IOException { + void testUniverseDomain() throws IOException { // This test is not yet part presubmit integration test as it requires the apis-tpclp.goog // universe domain credentials. - // Test a valid read session in the universe domain gdutst. + // Test a valid domain using the default credentials and Google default universe domain. BigQueryReadSettings bigQueryReadSettings = BigQueryReadSettings.newBuilder().setUniverseDomain("apis-tpclp.goog").build(); BigQueryReadClient localClient = BigQueryReadClient.create(bigQueryReadSettings); @@ -1209,12 +1213,12 @@ private void ProcessRowsAtSnapshot( ReadSession session = client.createReadSession(createSessionRequestBuilder.build()); assertEquals( + 1, + session.getStreamsCount(), String.format( "Did not receive expected number of streams for table '%s' CreateReadSession" + " response:%n%s", - table, session.toString()), - 1, - session.getStreamsCount()); + table, session.toString())); ReadRowsRequest readRowsRequest = ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build(); @@ -1288,9 +1292,9 @@ private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration) assertNotNull(completedJob); assertNull( + /* object= */ completedJob.getStatus().getError(), /* message= */ "Received a job status that is not a success: " - + completedJob.getStatus().toString(), - /* object= */ completedJob.getStatus().getError()); + + completedJob.getStatus().toString()); return completedJob; } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/SimpleRowReader.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/SimpleRowReader.java index d5061fb4f0..f655a71555 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/SimpleRowReader.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/it/SimpleRowReader.java @@ -62,7 +62,7 @@ public SimpleRowReader(Schema schema) { * @param avroRows object returned from the ReadRowsResponse. * @param rowConsumer consumer that accepts GenericRecord. */ - public void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException { + void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException { Preconditions.checkNotNull(avroRows); Preconditions.checkNotNull(rowConsumer); decoder = diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettingsTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettingsTest.java index f28b855877..779523e422 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettingsTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/EnhancedBigQueryReadStubSettingsTest.java @@ -32,16 +32,13 @@ import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; import java.time.Duration; import java.util.Set; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; -@RunWith(JUnit4.class) public class EnhancedBigQueryReadStubSettingsTest { @Test - public void testSettingsArePreserved() { + void testSettingsArePreserved() { String endpoint = "some.other.host:123"; CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class); Duration watchdogInterval = Duration.ofSeconds(12); @@ -100,14 +97,14 @@ private void verifySettings( } @Test - public void testCreateReadSessionSettings() { + void testCreateReadSessionSettings() { UnaryCallSettings.Builder builder = EnhancedBigQueryReadStubSettings.newBuilder().createReadSessionSettings(); verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); } @Test - public void testReadRowsSettings() { + void testReadRowsSettings() { ServerStreamingCallSettings.Builder builder = EnhancedBigQueryReadStubSettings.newBuilder().readRowsSettings(); assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE); @@ -123,7 +120,7 @@ public void testReadRowsSettings() { } @Test - public void testSplitReadStreamSettings() { + void testSplitReadStreamSettings() { UnaryCallSettings.Builder builder = EnhancedBigQueryReadStubSettings.newBuilder().splitReadStreamSettings(); verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings()); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/ResourceHeaderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/ResourceHeaderTest.java index 1b87a2391c..5b6bf390f8 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/ResourceHeaderTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/ResourceHeaderTest.java @@ -32,15 +32,12 @@ import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; import com.google.cloud.bigquery.storage.v1beta2.WriteStream; import java.util.regex.Pattern; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + public class ResourceHeaderTest { private static final String TEST_TABLE_REFERENCE = @@ -51,7 +48,7 @@ public class ResourceHeaderTest { private static final String TEST_STREAM_NAME = "streamName"; - private static final String NAME = "resource-header-test:123"; + private static final String NAME = "resource-header-test:123-v1beta2"; private static final String HEADER_NAME = "x-goog-request-params"; @@ -92,14 +89,14 @@ public class ResourceHeaderTest { private BigQueryReadClient client; private BigQueryWriteClient writeClient; - @BeforeClass + @BeforeAll public static void setUpClass() throws Exception { server = new InProcessServer<>(new BigQueryReadImplBase() {}, NAME); server.start(); } - @Before - public void setUp() throws Exception { + @BeforeEach + void setUp() throws Exception { channelProvider = LocalChannelProvider.create(NAME); BigQueryReadSettings.Builder settingsBuilder = BigQueryReadSettings.newBuilder() @@ -115,19 +112,19 @@ public void setUp() throws Exception { writeClient = BigQueryWriteClient.create(writeSettingsBuilder.build()); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); } - @AfterClass - public static void tearDownClass() throws Exception { + @AfterAll + static void tearDownClass() throws Exception { server.stop(); server.blockUntilShutdown(); } @Test - public void createReadSessionTest() { + void createReadSessionTest() { try { client.createReadSession( "parents/project", ReadSession.newBuilder().setTable(TEST_TABLE_REFERENCE).build(), 1); @@ -138,7 +135,7 @@ public void createReadSessionTest() { } @Test - public void readRowsTest() { + void readRowsTest() { try { ReadRowsRequest request = ReadRowsRequest.newBuilder().setReadStream(TEST_STREAM_NAME).setOffset(125).build(); @@ -151,7 +148,7 @@ public void readRowsTest() { } @Test - public void splitReadStreamTest() { + void splitReadStreamTest() { try { client.splitReadStream(SplitReadStreamRequest.newBuilder().setName(TEST_STREAM_NAME).build()); } catch (UnimplementedException e) { @@ -162,7 +159,7 @@ public void splitReadStreamTest() { } @Test - public void createWriteStreamTest() { + void createWriteStreamTest() { try { writeClient.createWriteStream( "projects/project/datasets/dataset/tables/table", @@ -175,7 +172,7 @@ public void createWriteStreamTest() { } @Test - public void getWriteStreamTest() { + void getWriteStreamTest() { try { writeClient.getWriteStream(WRITE_STREAM_NAME); } catch (UnimplementedException e) { @@ -187,7 +184,7 @@ public void getWriteStreamTest() { // Following tests will work after b/185842996 is fixed. // @Test - // public void appendRowsTest() { + // void appendRowsTest() { // try { // AppendRowsRequest req = // AppendRowsRequest.newBuilder().setWriteStream(WRITE_STREAM_NAME).build(); @@ -202,7 +199,7 @@ public void getWriteStreamTest() { // } // // @Test - // public void appendRowsManualTest() { + // void appendRowsManualTest() { // try { // StreamWriterV2 streamWriter = // StreamWriterV2.newBuilder(WRITE_STREAM_NAME, writeClient) diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryTest.java index bb5e4f3217..94012b8416 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/stub/readrows/ReadRowsRetryTest.java @@ -15,9 +15,12 @@ */ package com.google.cloud.bigquery.storage.v1beta2.stub.readrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.GrpcTransportChannel; -import com.google.api.gax.rpc.FixedTransportChannelProvider; +import com.google.api.gax.grpc.testing.InProcessServer; +import com.google.api.gax.grpc.testing.LocalChannelProvider; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadClient; import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadGrpc.BigQueryReadImplBase; @@ -27,50 +30,47 @@ import com.google.common.collect.Queues; import io.grpc.Status.Code; import io.grpc.stub.StreamObserver; -import io.grpc.testing.GrpcServerRule; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Queue; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class ReadRowsRetryTest { +import java.util.UUID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; - @Rule public GrpcServerRule serverRule = new GrpcServerRule(); +class ReadRowsRetryTest { private TestBigQueryStorageService service; private BigQueryReadClient client; + private InProcessServer server; + private LocalChannelProvider channelProvider; - @Before - public void setUp() throws IOException { + @BeforeEach + void setUp() throws Exception { service = new TestBigQueryStorageService(); - serverRule.getServiceRegistry().addService(service); + String serverName = UUID.randomUUID().toString(); + server = new InProcessServer<>(service, serverName); + server.start(); + channelProvider = LocalChannelProvider.create(serverName); BigQueryReadSettings settings = BigQueryReadSettings.newBuilder() .setCredentialsProvider(NoCredentialsProvider.create()) - .setTransportChannelProvider( - FixedTransportChannelProvider.create( - GrpcTransportChannel.create(serverRule.getChannel()))) + .setTransportChannelProvider(channelProvider) .build(); client = BigQueryReadClient.create(settings); } - @After - public void tearDown() throws Exception { + @AfterEach + void tearDown() throws Exception { client.close(); + server.stop(); + server.blockUntilShutdown(); } @Test - public void happyPathTest() { + void happyPathTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -78,11 +78,11 @@ public void happyPathTest() { .respondWithNumberOfRows(10) .respondWithNumberOfRows(7)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } @Test - public void immediateRetryTest() { + void immediateRetryTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -95,11 +95,11 @@ public void immediateRetryTest() { .respondWithNumberOfRows(10) .respondWithNumberOfRows(7)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } @Test - public void multipleRetryTestWithZeroInitialOffset() { + void multipleRetryTestWithZeroInitialOffset() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -117,11 +117,11 @@ public void multipleRetryTestWithZeroInitialOffset() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6)); - Assert.assertEquals(28, getRowCount(request)); + assertEquals(28, getRowCount(request)); } @Test - public void multipleRetryTestWithNonZeroInitialOffset() { + void multipleRetryTestWithNonZeroInitialOffset() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17); service.expectations.add( RpcExpectation.create() @@ -139,11 +139,11 @@ public void multipleRetryTestWithNonZeroInitialOffset() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3)); - Assert.assertEquals(25, getRowCount(request)); + assertEquals(25, getRowCount(request)); } @Test - public void errorAtTheVeryEndTest() { + void errorAtTheVeryEndTest() { ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0); service.expectations.add( RpcExpectation.create() @@ -155,7 +155,7 @@ public void errorAtTheVeryEndTest() { service.expectations.add( RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0)); - Assert.assertEquals(17, getRowCount(request)); + assertEquals(17, getRowCount(request)); } private int getRowCount(ReadRowsRequest request) { @@ -179,17 +179,15 @@ public void readRows( RpcExpectation expectedRpc = expectations.poll(); currentRequestIndex++; - Assert.assertNotNull( - "Unexpected request #" + currentRequestIndex + ": " + request.toString(), expectedRpc); - - Assert.assertEquals( + assertNotNull( + expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString()); + assertEquals( + expectedRpc.expectedRequest, + request, "Expected request #" + currentRequestIndex + " does not match actual request: " - + request.toString(), - expectedRpc.expectedRequest, - request); - + + request.toString()); for (ReadRowsResponse response : expectedRpc.responses) { responseObserver.onNext(response); } diff --git a/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto b/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto index 618bcc0a03..d878f7bdc9 100644 --- a/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto +++ b/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto @@ -35,6 +35,8 @@ message ComplexRoot { optional bytes test_bignumeric_double = 29; optional string test_interval = 30; repeated string test_json = 31; + optional string test_timestamp_higher_precision = 32; + repeated string test_timestamp_higher_precision_repeated = 33; } message CasingComplex { @@ -157,6 +159,18 @@ message TestTimestamp { optional int64 test_saformat = 9; } +message TestTimestampHigherPrecision { + optional string test_string = 1; + optional string test_string_t_z = 2; + optional string test_long = 3; + optional string test_int = 4; + optional string test_float = 5; + optional string test_offset = 6; + optional string test_zero_offset = 7; + optional string test_timezone = 8; + optional string test_saformat = 9; +} + message TestRepeatedTimestamp { repeated int64 test_string_repeated = 1; repeated int64 test_string_t_z_repeated = 2; @@ -169,6 +183,18 @@ message TestRepeatedTimestamp { repeated int64 test_saformat_repeated = 9; } +message TestRepeatedTimestampHigherPrecision { + repeated string test_string_repeated = 1; + repeated string test_string_t_z_repeated = 2; + repeated string test_long_repeated = 3; + repeated string test_int_repeated = 4; + repeated string test_float_repeated = 5; + repeated string test_offset_repeated = 6; + repeated string test_zero_offset_repeated = 7; + repeated string test_timezone_repeated = 8; + repeated string test_saformat_repeated = 9; +} + message TestDate { optional int32 test_string = 1; optional int32 test_long = 2; @@ -236,4 +262,4 @@ message TestRangeDatetime { message TestRangeTimestamp { optional int64 start = 1; optional int64 end = 2; -} \ No newline at end of file +} diff --git a/grpc-google-cloud-bigquerystorage-v1/pom.xml b/grpc-google-cloud-bigquerystorage-v1/pom.xml index b2992984d2..80fbbd97f4 100644 --- a/grpc-google-cloud-bigquerystorage-v1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 3.18.0 + 3.19.1 grpc-google-cloud-bigquerystorage-v1 GRPC library for grpc-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java index f778136494..9fccc968a4 100644 --- a/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java index fcbc2af218..30ec1e6364 100644 --- a/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml b/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml index f9823bd91d..7a6980e6a1 100644 --- a/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1alpha - 3.18.0 + 3.19.1 grpc-google-cloud-bigquerystorage-v1alpha GRPC library for google-cloud-bigquerystorage com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/grpc-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceGrpc.java b/grpc-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceGrpc.java index f62d49acb1..f932748238 100644 --- a/grpc-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceGrpc.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/grpc-google-cloud-bigquerystorage-v1beta/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta/pom.xml index 25c779e13d..4af3d9c928 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta - 3.18.0 + 3.19.1 grpc-google-cloud-bigquerystorage-v1beta GRPC library for google-cloud-bigquerystorage com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/grpc-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceGrpc.java b/grpc-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceGrpc.java index 7049c1efcb..2286f95f08 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceGrpc.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml index 42eaa6455b..6810f8588c 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.190.0 + 0.191.1 grpc-google-cloud-bigquerystorage-v1beta1 GRPC library for grpc-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java b/grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java index eefb14ee8f..cf74d93f68 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml index 4e551959ae..0e9d698489 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.190.0 + 0.191.1 grpc-google-cloud-bigquerystorage-v1beta2 GRPC library for grpc-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java b/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java index f448f93de6..ae0f0e5367 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java index 32d8ef9881..62668225f7 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/pom.xml b/pom.xml index b1e0a8b6ef..a7bf8c0ac7 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-bigquerystorage-parent pom - 3.18.0 + 3.19.1 BigQuery Storage Parent https://github.com/googleapis/java-bigquerystorage @@ -14,7 +14,7 @@ com.google.cloud sdk-platform-java-config - 3.54.1 + 3.55.1 @@ -83,57 +83,57 @@ com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta - 3.18.0 + 3.19.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta - 3.18.0 + 3.19.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1alpha - 3.18.0 + 3.19.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1alpha - 3.18.0 + 3.19.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.190.0 + 0.191.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.190.0 + 0.191.1 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 3.18.0 + 3.19.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.190.0 + 0.191.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.190.0 + 0.191.1 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 3.18.0 + 3.19.1 com.google.cloud google-cloud-bigquerystorage - 3.18.0 + 3.19.1 org.json @@ -142,6 +142,8 @@ + + junit junit @@ -151,7 +153,7 @@ com.google.cloud google-cloud-bigquery - 2.53.0 + 2.57.1 test @@ -185,6 +187,10 @@ org.objenesis:objenesis javax.annotation:javax.annotation-api + + org.junit.jupiter:junit-jupiter-engine + + org.junit.vintage:junit-vintage-engine diff --git a/proto-google-cloud-bigquerystorage-v1/pom.xml b/proto-google-cloud-bigquerystorage-v1/pom.xml index f82b308ee3..3638851b4a 100644 --- a/proto-google-cloud-bigquerystorage-v1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 3.18.0 + 3.19.1 proto-google-cloud-bigquerystorage-v1 PROTO library for proto-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AnnotationsProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AnnotationsProto.java index 2d5911219d..d3d9668257 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AnnotationsProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AnnotationsProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java index 2972678451..8092298eba 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java index e17f00d96a..3c8ace9f65 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java index 64bafeea1e..1e450082c3 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java index ed74fb744c..fcf43e7a99 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java index a9d6fc7a0b..4fdf716dbb 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatch.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatch.java index fb40b6f67b..3f80fecfdb 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatch.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatch.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatchOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatchOrBuilder.java index 18a00c8fc5..3adab47302 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatchOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowRecordBatchOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchema.java index 76bc465b62..a603eaef2a 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchema.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchemaOrBuilder.java index 3e2f038691..537933883d 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java index fdf9acac7c..7e5151528b 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java index 1fcd8f4b06..7223ad0e9c 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java index eb249acb13..fd13a61e85 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRows.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRows.java index cf61ef8263..1b1636c8ef 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRows.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRows.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRowsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRowsOrBuilder.java index 02988d1c99..56e6c60803 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRowsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroRowsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchema.java index b7407beff5..bbf13a0d40 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchema.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchemaOrBuilder.java index 9ab8dea899..b8bfa619d9 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java index d687adb37a..8f82f91f75 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java index 289136efc0..71ed37c869 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java index ba6e44f978..5e63c2c7a7 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java index e3edbc978c..b51638a03d 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java index ad6d2cf551..b7a757aeb1 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java index 61509bd2ef..21da18b0f9 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BatchCommitWriteStreamsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java index 427a3ab891..6517fefbd2 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java index 74950685b3..488e316291 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java index 31933aa03c..62c4693113 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java index 177b4adf2c..1ae0342cd7 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateWriteStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java index 9a38e0ca09..44c65cab60 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java index f5469083a2..0aabf054aa 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java index b1e0f17e30..2ae4bd37c8 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java index 9fc915fc29..d34445e820 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java index 69b7aa2f29..c7b5797fb3 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FinalizeWriteStreamResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java index 5989da21fd..d3fbae6f53 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java index 70e0f5e85c..21472e5d72 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java index bb2ed9cb61..dc8e02a219 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java index 19b7d2aa05..64a56f97ce 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/FlushRowsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java index 46b35ba0f1..ddc78090e3 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java index 2fb14b9f98..9c3a062f97 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/GetWriteStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProjectName.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProjectName.java index ecfae6f33e..e8e97004fe 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProjectName.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProjectName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java index ab50c8bc3e..eda883cf99 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoBufProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java index 75206c167d..28b0b47af4 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRows.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java index a8bdf59e44..6537a430f5 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoRowsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java index f0aeb5d2b4..868f2503c8 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java index bbc5a5fe80..9e3b38259a 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ProtoSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequest.java index ba47682c3e..a17751510a 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequestOrBuilder.java index c612163a26..412dbf8cd1 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponse.java index 3e3ba4ed53..026deff8e3 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponseOrBuilder.java index e6b4b760ec..e23f9a111d 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadRowsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java index c0d279ba46..059114b948 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java index 77b3fe290e..8c90e760a1 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStream.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStream.java index b23e29c4c3..66249ea3bb 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStream.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamName.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamName.java index e30723ebae..d35aab6c7f 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamName.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamOrBuilder.java index 87ed3da2b7..1d5a670eb2 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadStreamOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java index 73b6ed1ae3..70ecc0bde5 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java index 59147ed846..59f352e455 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequest.java index 00f589e484..a5ed7619c0 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequestOrBuilder.java index f5f374ce08..efe759bfe9 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponse.java index d120edc3d0..fe1bcbff54 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponse.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponseOrBuilder.java index d625abd7e9..824e8a5ea1 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/SplitReadStreamResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java index 91a314d75f..38bf9873ab 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageError.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java index f81fc635f1..d737491c6c 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageErrorOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java index 5770c73073..4fa1356a95 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java index d999837dad..eeee6336b3 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java index da4a41e046..67e8c63674 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStats.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStatsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStatsOrBuilder.java index d8d95fc57d..64bc8745ad 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStatsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamStatsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java index 21b404c06e..3f303282c4 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java index 3cbe66a9d1..7562f90396 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java index 32ba4a2790..31408f73bb 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java index c17623b1a5..3983095fd0 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java index fc54c81ace..c96fb6ef45 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java index c91429c988..d6818e3f4a 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleState.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleState.java index 31ddc17119..1d721a051e 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleState.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleState.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleStateOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleStateOrBuilder.java index 3370900e71..7de7ac8112 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleStateOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ThrottleStateOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java index 722a2b0f79..7b86f80521 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java index 73e80d1a57..38c642a064 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java index 1bff37c43c..fa73bd663d 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamView.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamView.java index 0acfa40599..683bf85004 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamView.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamView.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/pom.xml b/proto-google-cloud-bigquerystorage-v1alpha/pom.xml index afe8c3d3ee..057a6ff638 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1alpha/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1alpha - 3.18.0 + 3.19.1 proto-google-cloud-bigquerystorage-v1alpha Proto library for google-cloud-bigquerystorage com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequest.java index 8f8de1477c..d326a5ffe7 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequestOrBuilder.java index 081ef7af67..bf0d2edfcf 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponse.java index 942252b3a2..aaf9aa1d52 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponseOrBuilder.java index 62e835a88b..8833e47564 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchCreateMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequest.java index 8f419fc9fb..e085da0346 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequestOrBuilder.java index 7b2687d392..e16d0c3a73 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchDeleteMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeError.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeError.java index 26cc33c623..4ef0e01049 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeError.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeError.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeErrorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeErrorOrBuilder.java index 07ff7604c1..b46f09f253 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeErrorOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchSizeTooLargeErrorOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequest.java index e9a74c736f..b6a7cd10ef 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequestOrBuilder.java index cfe3985b02..bf6df560ce 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponse.java index e18af50c67..7d7b06233c 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponseOrBuilder.java index 496cc00f42..89418ed69c 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/BatchUpdateMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequest.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequest.java index e9b1b99eb2..373368a528 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequest.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequestOrBuilder.java index e2d608a8b2..8512d70a11 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/CreateMetastorePartitionRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchema.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchema.java index 7ac57a007d..ed5c2ab4f6 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchema.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchemaOrBuilder.java index 506a676f65..1a95fe04c7 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/FieldSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequest.java index 81f8029a45..9d72cb0c21 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequestOrBuilder.java index 5b16bf22c3..481e4ab42d 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponse.java index 68fc6cff2f..10822cbf87 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponseOrBuilder.java index 010d896371..b7aefe9d0a 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ListMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartition.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartition.java index 6dffce9e2e..c66ca131ca 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartition.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartition.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionList.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionList.java index 4960576279..3bad3b0fc5 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionList.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionList.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionListOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionListOrBuilder.java index 93b26df465..82e4981e43 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionListOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionListOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionOrBuilder.java index cb49773079..de488c98ab 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionProto.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionProto.java index 58f70d0a6c..517fea249c 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionProto.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceProto.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceProto.java index 2889a44b63..02f151c3bc 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceProto.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValues.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValues.java index 2e5d9f0d69..28df0b421d 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValues.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValues.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValuesOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValuesOrBuilder.java index 84b1715206..5c3d4ae622 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValuesOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionValuesOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStream.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStream.java index c3c64e30d9..ee65e7f975 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStream.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStreamOrBuilder.java index 6a5a12c140..64fa2ef42f 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStreamOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/ReadStreamOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfo.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfo.java index f1bfc99a4f..7546d87851 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfo.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfo.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfoOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfoOrBuilder.java index 559c2ef678..3c8573ecd2 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfoOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/SerDeInfoOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptor.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptor.java index 6169c79484..800587f1bf 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptor.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptorOrBuilder.java index fc029bb23c..491d0beea7 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptorOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StorageDescriptorOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamList.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamList.java index 0fe6c7d4fe..385ae73988 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamList.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamList.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamListOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamListOrBuilder.java index 0cac4f0dc3..00b28ac19d 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamListOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamListOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequest.java index 8996369d5c..d5005e0dd7 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequestOrBuilder.java index e2f4844591..f1d3f64e93 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponse.java index de1118e7a8..3e4101f4fb 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponseOrBuilder.java index 1e7b30a95c..dc0403ac08 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/StreamMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/TableName.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/TableName.java index 3da4378f56..ac01f24a66 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/TableName.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/TableName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequest.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequest.java index 2f03fe858d..d4f469d8f3 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequest.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequestOrBuilder.java index 332c4dfd89..94714688c4 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1alpha/src/main/java/com/google/cloud/bigquery/storage/v1alpha/UpdateMetastorePartitionRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/pom.xml b/proto-google-cloud-bigquerystorage-v1beta/pom.xml index abaf3b1e25..4df46f4cb7 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta - 3.18.0 + 3.19.1 proto-google-cloud-bigquerystorage-v1beta Proto library for google-cloud-bigquerystorage com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequest.java index 8a2b49bb03..a4be7c32cb 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequestOrBuilder.java index 91093227a1..5b0b7805d0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponse.java index 7c1c0c406e..87a6f46807 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponseOrBuilder.java index 9adc7872f4..553318a77c 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchCreateMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequest.java index 45ed2c8dcf..3e868c3dd9 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequestOrBuilder.java index eca14555f5..92e7ec7802 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchDeleteMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeError.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeError.java index e698b695bc..5266081a10 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeError.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeError.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeErrorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeErrorOrBuilder.java index 9db8a90920..f5f10487fc 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeErrorOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchSizeTooLargeErrorOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequest.java index 2071a6f741..eced1f331c 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequestOrBuilder.java index ef45510336..ec207dfaf5 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponse.java index 5a865193db..307a08dd8d 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponseOrBuilder.java index 029af53c66..4f21f0e2c1 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/BatchUpdateMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequest.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequest.java index 8ebf211442..e9c8512013 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequestOrBuilder.java index 176549799e..7c10fb7c50 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/CreateMetastorePartitionRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchema.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchema.java index 396c6dfeeb..b30839e145 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchemaOrBuilder.java index 07ebe1fe29..592e3d4e83 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/FieldSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequest.java index 4a0aa6cce5..9ad077c6dc 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequestOrBuilder.java index 504cfa2ef7..b2a8e541e9 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponse.java index e5ac75f884..d45229ad27 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponseOrBuilder.java index bde6487a9c..5ae8a0a9ef 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ListMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartition.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartition.java index fc86387ecd..88813249d2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartition.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartition.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionList.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionList.java index b5bc1426f4..2ff6ba3def 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionList.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionList.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionListOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionListOrBuilder.java index bd6ac3e19c..f05565bf42 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionListOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionListOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionOrBuilder.java index 0c2c8bbf79..e3d1ae03c0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionProto.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionProto.java index 612892fc80..c115d6b1c2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceProto.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceProto.java index 8862c414c3..042afd2029 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValues.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValues.java index f7efc0dab3..75cde00998 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValues.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValues.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValuesOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValuesOrBuilder.java index d458d6d244..a81b26c4b0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValuesOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionValuesOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStream.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStream.java index 9fbbae301c..16764bcbbb 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStream.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStreamOrBuilder.java index 54e313235c..7bd58931f0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStreamOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/ReadStreamOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfo.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfo.java index 5bace4d206..3b4d955045 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfo.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfo.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfoOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfoOrBuilder.java index c0c28e9996..dfc9ffa34f 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfoOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/SerDeInfoOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptor.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptor.java index 7d4cbbef66..ed4a0b30b4 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptor.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptor.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptorOrBuilder.java index d155d19137..f76b8548d2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptorOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StorageDescriptorOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamList.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamList.java index 06ae0d6882..c74861ff06 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamList.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamList.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamListOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamListOrBuilder.java index 107579c54f..65310bfeea 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamListOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamListOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequest.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequest.java index 7ae6b78392..0202706916 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequestOrBuilder.java index a5563a1a1b..5cdf176e1f 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponse.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponse.java index ebdd5f8c99..27708ebee6 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponseOrBuilder.java index 15054877fc..99fc1a3496 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/StreamMetastorePartitionsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/TableName.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/TableName.java index 21bbdcd632..2af3750208 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/TableName.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/TableName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequest.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequest.java index 3dfa67f6c7..09621576b3 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequestOrBuilder.java index aa9dc729ac..95fe6effb2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta/src/main/java/com/google/cloud/bigquery/storage/v1beta/UpdateMetastorePartitionRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml index 6c646a7948..962b23b148 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.190.0 + 0.191.1 proto-google-cloud-bigquerystorage-v1beta1 PROTO library for proto-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java index f8873da612..bfea7beff3 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java index f955d2eb75..fd8163fc50 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java index f8b843db35..0d6ace7612 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java index b23cd8c06c..ca44ba83f0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java index 45055b389f..b7aef33c92 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java index 0a46dd160b..d5930ba36c 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml index e89263e152..12b8092dc4 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.190.0 + 0.191.1 proto-google-cloud-bigquerystorage-v1beta2 PROTO library for proto-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 3.18.0 + 3.19.1 diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java index 4fda3d0c92..fb1fc584c4 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java index c7d7f8990f..059bc6bad6 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java index c30996a133..0057168f62 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java index 32ff23b207..0d3a3d13a0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java index 2c73cc8bf0..9198266a5d 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java index 66c247186a..13f037b6e2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java index 92a4d1dc55..229367f108 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java index 5df4ecf9e9..c1650c52ed 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java index af6122e24c..102058e3fb 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptions.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptions.java index 4450e10cb2..46ef400911 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptions.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptions.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptionsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptionsOrBuilder.java index 26403c0dd8..3e018284c3 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptionsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSerializationOptionsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java index 2bf3a749d2..6f65817647 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java index cc49f1807c..e10ede3159 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java index c64c9d0d25..e9adfb1836 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java index 1635de4a03..66185a3161 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java index 75d5956c3b..2dbad88961 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java index 63cd6b9f52..a7cea1c2b9 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java index bd288b9b50..fe6fd6f7ed 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java index b6643e1d66..5c01de753e 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java index 75b9e5a025..bccca201ab 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java index 1f945a9b5b..0c732d98ea 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java index 6556999040..12dc423dad 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequest.java index 3a58a80902..eeb313d5df 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequestOrBuilder.java index a11dd9d663..ab614acd01 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateWriteStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java index 737a38f69b..b48dcf7dc2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequest.java index 44a5d166e5..8b7ba72571 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequestOrBuilder.java index 9758fcda5b..af8da877cd 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponse.java index 9d5a11e8ec..8f93ccbb53 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponseOrBuilder.java index ed85de4ad1..1759cde84b 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FinalizeWriteStreamResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequest.java index 7c4c9f8e90..df6659b47c 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequestOrBuilder.java index e9f5f096ca..b958b669a5 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponse.java index 99f9f8bcd7..e3d83a223a 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponseOrBuilder.java index ccfcd06515..78c8f92fc3 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/FlushRowsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequest.java index 9ccef51f5a..8524cdacb5 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequestOrBuilder.java index d3ce9bec11..7147dedbc0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/GetWriteStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProjectName.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProjectName.java index ceb20430e8..a3e66be4b2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProjectName.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProjectName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoBufProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoBufProto.java index a7410989fd..e0dd0c0ce4 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoBufProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoBufProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java index 9d617e9bca..4e3e6e7aa5 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRowsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRowsOrBuilder.java index 45c4ec1a5d..93f5abf169 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRowsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRowsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java index 37d4b5eb83..f819699424 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaOrBuilder.java index db617cf189..082c055bbd 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java index 938a1d1841..2c3651f756 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java index 67b74eb578..a77a3b1920 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java index 44c1f31151..20f56e5626 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java index 6fd935d994..f313414e5a 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java index 004364a72c..325967406d 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java index 45364e3db7..04b5e1daab 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java index 25f7dd4f7c..8d8cbbbe2a 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamName.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamName.java index cd43eddbcb..32e330887b 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamName.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java index b89baecc96..74e71d9450 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java index 3799f9af88..b60d4b08b2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java index 59043660fb..a881cdb02f 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java index cad628da52..ba85c6dd69 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java index 090ca9903a..de7ccf31a3 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java index 99b27e2165..033e2e463e 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageErrorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageErrorOrBuilder.java index 8fb65130dc..8a0ae28075 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageErrorOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageErrorOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java index 01224df977..4c33c8d747 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java index 55e5a2ca00..802689d3f2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java index d1dbc3e2a3..da9c2978e2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java index db94cf3838..25b70e48f9 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java index 01619cb87b..dc3f105c00 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchemaOrBuilder.java index 776ee74fa2..82429015a2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableName.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableName.java index ded66ab2a4..84c1c7f92e 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableName.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java index 13a884dcd2..50f020e376 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchema.java index a9a5661972..3d94059bbd 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchema.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchemaOrBuilder.java index 63024282b0..365e47765d 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchemaOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableSchemaOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java index 56fe548b94..0dade90344 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java index d3dfd51575..68f0ac8149 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStream.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStream.java index 261032adb1..379c5e3944 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStream.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStream.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamName.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamName.java index fd571a9531..3cce937c8a 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamName.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamName.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamOrBuilder.java index 22b26591ad..5249ae1336 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/WriteStreamOrBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright 2025 Google LLC + * Copyright 2026 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/renovate.json b/renovate.json index 598f2acf29..1a342849b8 100644 --- a/renovate.json +++ b/renovate.json @@ -1,4 +1,6 @@ { + "prConcurrentLimit": 0, + "prHourlyLimit": 0, "extends": [ ":separateMajorReleases", ":combinePatchMinorReleases", diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index 8078f374fb..c7e67157bd 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -39,7 +39,7 @@ com.google.cloud google-cloud-bigquery - 2.53.0 + 2.57.1 org.apache.avro diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 9be30a165a..2e61c8190c 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -30,14 +30,14 @@ com.google.cloud google-cloud-bigquerystorage - 3.18.0 + 3.19.1 com.google.cloud google-cloud-bigquery - 2.53.0 + 2.57.1 org.apache.avro diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index de49df355e..9d1318f911 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -57,7 +57,7 @@ com.google.cloud google-cloud-bigquery - 2.53.0 + 2.57.1 org.apache.avro diff --git a/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java b/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java index 08604d4d9d..ea06c76e17 100644 --- a/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java +++ b/samples/snippets/src/main/java/com/example/bigquerystorage/ExportOpenTelemetry.java @@ -105,11 +105,11 @@ public static void exportToOpenTelemetry(String projectId, String datasetName, S // Final cleanup for the stream during worker teardown. writer.cleanup(); - verifyExpectedRowCount(parentTable, 12); + verifyExpectedRowCount(parentTable, 12L); System.out.println("Appended records successfully."); } - private static void verifyExpectedRowCount(TableName parentTable, int expectedRowCount) + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) throws InterruptedException { String queryRowCount = "SELECT COUNT(*) FROM `" @@ -122,8 +122,8 @@ private static void verifyExpectedRowCount(TableName parentTable, int expectedRo QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService(); TableResult results = bigquery.query(queryConfig); - int countRowsActual = - Integer.parseInt(results.getValues().iterator().next().get("f0_").getStringValue()); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); if (countRowsActual != expectedRowCount) { throw new RuntimeException( "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); @@ -175,7 +175,7 @@ private JsonStreamWriter createStreamWriter(String tableName) // For more information about JsonStreamWriter, see: // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.html return JsonStreamWriter.newBuilder(tableName, client) - .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(100))) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) .setChannelProvider( BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) diff --git a/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java b/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java new file mode 100644 index 0000000000..829bbb31e9 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampArrow.java @@ -0,0 +1,185 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_read_timestamp_arrow] +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1.ArrowRecordBatch; +import com.google.cloud.bigquery.storage.v1.ArrowSchema; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.FieldVector; +import org.apache.arrow.vector.VectorLoader; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.ipc.ReadChannel; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.arrow.vector.util.ByteArrayReadableSeekableByteChannel; + +/** + * Depending on the JDK version, you may need to include this into your VM options: {@code + * --add-opens=java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED} + * + *

See the documentation for + * more information. + */ +public class ReadTimestampArrow { + /* + * SimpleRowReader handles deserialization of the Apache Arrow-encoded row batches transmitted + * from the storage API using a generic datum decoder. + */ + private static class SimpleRowReader implements AutoCloseable { + + BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE); + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private final VectorSchemaRoot root; + private final VectorLoader loader; + + public SimpleRowReader(ArrowSchema arrowSchema) throws IOException { + Schema schema = + MessageSerializer.deserializeSchema( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + arrowSchema.getSerializedSchema().toByteArray()))); + Preconditions.checkNotNull(schema); + List vectors = new ArrayList<>(); + for (Field field : schema.getFields()) { + vectors.add(field.createVector(allocator)); + } + root = new VectorSchemaRoot(vectors); + loader = new VectorLoader(root); + } + + /** + * Sample method for processing Arrow data which only validates decoding. + * + * @param batch object returned from the ReadRowsResponse. + */ + public void processRows(ArrowRecordBatch batch) throws IOException { + org.apache.arrow.vector.ipc.message.ArrowRecordBatch deserializedBatch = + MessageSerializer.deserializeRecordBatch( + new ReadChannel( + new ByteArrayReadableSeekableByteChannel( + batch.getSerializedRecordBatch().toByteArray())), + allocator); + + loader.load(deserializedBatch); + // Release buffers from batch (they are still held in the vectors in root). + deserializedBatch.close(); + System.out.println(root.contentToTSVString()); + // Release buffers from vectors in root. + root.clear(); + } + + @Override + public void close() { + root.close(); + allocator.close(); + } + } + + public static void main(String... args) throws Exception { + // Sets your Google Cloud Platform project ID. + String projectId = args[0]; + Long snapshotMillis = null; + if (args.length > 1) { + snapshotMillis = Long.parseLong(args[1]); + } + + try (BigQueryReadClient client = BigQueryReadClient.create()) { + String parent = String.format("projects/%s", projectId); + + // This example uses citibike data from the public datasets. + String srcTable = + String.format( + "projects/%s/datasets/%s/tables/%s", + "bigquery-public-data", "new_york_citibike", "citibike_stations"); + + // We specify the columns to be projected by adding them to the selected fields, + ReadSession.TableReadOptions options = + ReadSession.TableReadOptions.newBuilder().addSelectedFields("last_reported").build(); + + // Start specifying the read session we want created. + ReadSession.Builder sessionBuilder = + ReadSession.newBuilder() + .setTable(srcTable) + // This API can also deliver data serialized in Apache Avro format. + // This example leverages Apache Arrow. + .setDataFormat(DataFormat.ARROW) + .setReadOptions(options); + + // Optionally specify the snapshot time. When unspecified, snapshot time is "now". + if (snapshotMillis != null) { + Timestamp t = + Timestamp.newBuilder() + .setSeconds(snapshotMillis / 1000) + .setNanos((int) ((snapshotMillis % 1000) * 1000000)) + .build(); + TableModifiers modifiers = TableModifiers.newBuilder().setSnapshotTime(t).build(); + sessionBuilder.setTableModifiers(modifiers); + } + + // Begin building the session creation request. + CreateReadSessionRequest.Builder builder = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(sessionBuilder) + .setMaxStreamCount(1); + + ReadSession session = client.createReadSession(builder.build()); + // Setup a simple reader and start a read session. + try (ReadTimestampArrow.SimpleRowReader reader = + new ReadTimestampArrow.SimpleRowReader(session.getArrowSchema())) { + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results + // directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasArrowRecordBatch()); + reader.processRows(response.getArrowRecordBatch()); + } + } + } + } +} +// [END bigquerystorage_read_timestamp_arrow] diff --git a/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java b/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java new file mode 100644 index 0000000000..6343c7739f --- /dev/null +++ b/samples/snippets/src/main/java/com/example/bigquerystorage/ReadTimestampAvro.java @@ -0,0 +1,151 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_read_timestamp_avro] +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigquery.storage.v1.AvroRows; +import com.google.cloud.bigquery.storage.v1.BigQueryReadClient; +import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1.DataFormat; +import com.google.cloud.bigquery.storage.v1.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1.ReadSession; +import com.google.cloud.bigquery.storage.v1.ReadSession.TableModifiers; +import com.google.common.base.Preconditions; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.BinaryDecoder; +import org.apache.avro.io.DatumReader; +import org.apache.avro.io.DecoderFactory; + +public class ReadTimestampAvro { + /* + * SimpleRowReader handles deserialization of the Avro-encoded row blocks transmitted + * from the storage API using a generic datum decoder. + */ + private static class SimpleRowReader { + + private final DatumReader datumReader; + + // Decoder object will be reused to avoid re-allocation and too much garbage collection. + private BinaryDecoder decoder = null; + + // GenericRecord object will be reused. + private GenericRecord row = null; + + public SimpleRowReader(Schema schema) { + Preconditions.checkNotNull(schema); + datumReader = new GenericDatumReader<>(schema); + } + + /** + * Sample method for processing AVRO rows which only validates decoding. + * + * @param avroRows object returned from the ReadRowsResponse. + */ + public void processRows(AvroRows avroRows) throws IOException { + decoder = + DecoderFactory.get() + .binaryDecoder(avroRows.getSerializedBinaryRows().toByteArray(), decoder); + + while (!decoder.isEnd()) { + // Reusing object row + row = datumReader.read(row, decoder); + System.out.println(row.toString()); + } + } + } + + public static void main(String... args) throws Exception { + // Sets your Google Cloud Platform project ID. + String projectId = args[0]; + Long snapshotMillis = null; + if (args.length > 1) { + snapshotMillis = Long.parseLong(args[1]); + } + + try (BigQueryReadClient client = BigQueryReadClient.create()) { + String parent = String.format("projects/%s", projectId); + + // This example uses citibike data from the public datasets. + String srcTable = + String.format( + "projects/%s/datasets/%s/tables/%s", + "bigquery-public-data", "new_york_citibike", "citibike_stations"); + + // We specify the columns to be projected by adding them to the selected fields, + ReadSession.TableReadOptions options = + ReadSession.TableReadOptions.newBuilder().addSelectedFields("last_reported").build(); + + // Start specifying the read session we want created. + ReadSession.Builder sessionBuilder = + ReadSession.newBuilder() + .setTable(srcTable) + // This API can also deliver data serialized in Apache Avro format. + // This example leverages Apache Avro. + .setDataFormat(DataFormat.AVRO) + .setReadOptions(options); + + // Optionally specify the snapshot time. When unspecified, snapshot time is "now". + if (snapshotMillis != null) { + Timestamp t = + Timestamp.newBuilder() + .setSeconds(snapshotMillis / 1000) + .setNanos((int) ((snapshotMillis % 1000) * 1000000)) + .build(); + TableModifiers modifiers = TableModifiers.newBuilder().setSnapshotTime(t).build(); + sessionBuilder.setTableModifiers(modifiers); + } + + // Begin building the session creation request. + CreateReadSessionRequest.Builder builder = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(sessionBuilder) + .setMaxStreamCount(1); + + // Request the session creation. + ReadSession session = client.createReadSession(builder.build()); + + SimpleRowReader reader = + new SimpleRowReader(new Schema.Parser().parse(session.getAvroSchema().getSchema())); + + // Assert that there are streams available in the session. An empty table may not have + // data available. If no sessions are available for an anonymous (cached) table, consider + // writing results of a query to a named table rather than consuming cached results directly. + Preconditions.checkState(session.getStreamsCount() > 0); + + // Use the first stream to perform reading. + String streamName = session.getStreams(0).getName(); + + ReadRowsRequest readRowsRequest = + ReadRowsRequest.newBuilder().setReadStream(streamName).build(); + + // Process each block of rows as they arrive and decode using our simple row reader. + ServerStream stream = client.readRowsCallable().call(readRowsRequest); + for (ReadRowsResponse response : stream) { + Preconditions.checkState(response.hasAvroRows()); + reader.processRows(response.getAvroRows()); + } + } + } +} +// [END bigquerystorage_read_timestamp_avro] diff --git a/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java index 483238a816..fed1493587 100644 --- a/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java +++ b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStream.java @@ -106,11 +106,11 @@ public static void writeToDefaultStream(String projectId, String datasetName, St // Final cleanup for the stream during worker teardown. writer.cleanup(); - verifyExpectedRowCount(parentTable, 12); + verifyExpectedRowCount(parentTable, 12L); System.out.println("Appended records successfully."); } - private static void verifyExpectedRowCount(TableName parentTable, int expectedRowCount) + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) throws InterruptedException { String queryRowCount = "SELECT COUNT(*) FROM `" @@ -123,8 +123,8 @@ private static void verifyExpectedRowCount(TableName parentTable, int expectedRo QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService(); TableResult results = bigquery.query(queryConfig); - int countRowsActual = - Integer.parseInt(results.getValues().iterator().next().get("f0_").getStringValue()); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); if (countRowsActual != expectedRowCount) { throw new RuntimeException( "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); @@ -176,7 +176,7 @@ private JsonStreamWriter createStreamWriter(String tableName) // For more information about JsonStreamWriter, see: // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.html return JsonStreamWriter.newBuilder(tableName, client) - .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(100))) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) .setChannelProvider( BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) diff --git a/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java new file mode 100644 index 0000000000..9bcb32d764 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJson.java @@ -0,0 +1,312 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_timestamp_jsonstreamwriter_default] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.core.FixedExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.DescriptorValidationException; +import java.io.IOException; +import java.time.Instant; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; +import org.json.JSONArray; +import org.json.JSONObject; +import org.threeten.bp.Duration; + +public class WriteToDefaultStreamTimestampJson { + + public static void runWriteToDefaultStream() + throws Descriptors.DescriptorValidationException, InterruptedException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + String datasetName = "MY_DATASET_NAME"; + String tableName = "MY_TABLE_NAME"; + writeToDefaultStream(projectId, datasetName, tableName); + } + + // Create a JSON object that is compatible with the table schema. + private static JSONObject buildRecord() { + JSONObject record = new JSONObject(); + record.put("timestampField", Instant.now().toString()); + return record; + } + + public static void writeToDefaultStream(String projectId, String datasetName, String tableName) + throws Descriptors.DescriptorValidationException, InterruptedException, IOException { + TableName parentTable = TableName.of(projectId, datasetName, tableName); + + DataWriter writer = new DataWriter(); + // One time initialization for the worker. + writer.initialize(parentTable); + + // Write two batches of fake data to the stream, each with 10 JSON records. Data may be + // batched up to the maximum request size: + // https://cloud.google.com/bigquery/quotas#write-api-limits + for (int i = 0; i < 2; i++) { + JSONArray jsonArr = new JSONArray(); + for (int j = 0; j < 10; j++) { + jsonArr.put(buildRecord()); + } + + writer.append(new AppendContext(jsonArr)); + } + + // Final cleanup for the stream during worker teardown. + writer.cleanup(); + verifyExpectedRowCount(parentTable, 20L); + System.out.println("Appended records successfully."); + } + + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) + throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = BigQueryOptions.getDefaultInstance().getService(); + TableResult results = bigquery.query(queryConfig); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + if (countRowsActual != expectedRowCount) { + throw new RuntimeException( + "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); + } + } + + private static class AppendContext { + JSONArray data; + + AppendContext(JSONArray data) { + this.data = data; + } + } + + private static class DataWriter { + + private static final int MAX_RECREATE_COUNT = 3; + + private BigQueryWriteClient client; + + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + private final Object lock = new Object(); + private JsonStreamWriter streamWriter; + + @GuardedBy("lock") + private RuntimeException error = null; + + private final AtomicInteger recreateCount = new AtomicInteger(0); + + private JsonStreamWriter createStreamWriter(String tableName) + throws Descriptors.DescriptorValidationException, IOException, InterruptedException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the JSON stream writer to send records in JSON format. Specify the table name to write + // to the default stream. + // For more information about JsonStreamWriter, see: + // https://googleapis.dev/java/google-cloud-bigquerystorage/latest/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.html + return JsonStreamWriter.newBuilder(tableName, client) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveTimeout(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .build()) + .setEnableConnectionPool(true) + // This will allow connection pool to scale up better. + .setFlowControlSettings( + FlowControlSettings.newBuilder().setMaxOutstandingElementCount(100L).build()) + // If value is missing in json and there is a default value configured on bigquery + // column, apply the default value to the missing value field. + .setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE) + .setRetrySettings(retrySettings) + .build(); + } + + public void initialize(TableName parentTable) + throws Descriptors.DescriptorValidationException, IOException, InterruptedException { + // Initialize client without settings, internally within stream writer a new client will be + // created with full settings. + client = BigQueryWriteClient.create(); + + streamWriter = createStreamWriter(parentTable.toString()); + } + + public void append(AppendContext appendContext) + throws Descriptors.DescriptorValidationException, IOException, InterruptedException { + synchronized (this.lock) { + if (!streamWriter.isUserClosed() + && streamWriter.isClosed() + && recreateCount.getAndIncrement() < MAX_RECREATE_COUNT) { + streamWriter = createStreamWriter(streamWriter.getStreamName()); + this.error = null; + } + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(appendContext.data); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this, appendContext), MoreExecutors.directExecutor()); + + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup() { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + client.close(); + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + private final AppendContext appendContext; + + public AppendCompleteCallback(DataWriter parent, AppendContext appendContext) { + this.parent = parent; + this.appendContext = appendContext; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.println("Append success"); + this.parent.recreateCount.set(0); + done(); + } + + public void onFailure(Throwable throwable) { + if (throwable instanceof Exceptions.AppendSerializationError) { + Exceptions.AppendSerializationError ase = (Exceptions.AppendSerializationError) throwable; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + if (!rowIndexToErrorMessage.isEmpty()) { + // Omit the faulty rows + JSONArray dataNew = new JSONArray(); + for (int i = 0; i < appendContext.data.length(); i++) { + if (!rowIndexToErrorMessage.containsKey(i)) { + dataNew.put(appendContext.data.get(i)); + } else { + // process faulty rows by placing them on a dead-letter-queue, for instance + } + } + + // Retry the remaining valid rows, but using a separate thread to + // avoid potentially blocking while we are in a callback. + if (!dataNew.isEmpty()) { + try { + this.parent.append(new AppendContext(dataNew)); + } catch (DescriptorValidationException | IOException | InterruptedException e) { + throw new RuntimeException(e); + } + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + } + + boolean resendRequest = false; + if (throwable instanceof Exceptions.MaximumRequestCallbackWaitTimeExceededException) { + resendRequest = true; + } else if (throwable instanceof Exceptions.StreamWriterClosedException) { + if (!parent.streamWriter.isUserClosed()) { + resendRequest = true; + } + } + if (resendRequest) { + // Retry this request. + try { + this.parent.append(new AppendContext(appendContext.data)); + } catch (Descriptors.DescriptorValidationException + | IOException + | InterruptedException e) { + throw new RuntimeException(e); + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + + synchronized (this.parent.lock) { + if (this.parent.error == null) { + Exceptions.StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_timestamp_jsonstreamwriter_default] diff --git a/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java new file mode 100644 index 0000000000..6797aea936 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrow.java @@ -0,0 +1,367 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +// [START bigquerystorage_timestamp_streamwriter_default_arrow] +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.FixedExecutorProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.QueryJobConfiguration; +import com.google.cloud.bigquery.TableResult; +import com.google.cloud.bigquery.storage.v1.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.MoreExecutors; +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicInteger; +import javax.annotation.concurrent.GuardedBy; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.TimeStampNanoTZVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.VectorUnloader; +import org.apache.arrow.vector.compression.CompressionCodec; +import org.apache.arrow.vector.compression.CompressionUtil; +import org.apache.arrow.vector.compression.NoCompressionCodec; +import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; +import org.apache.arrow.vector.types.TimeUnit; +import org.apache.arrow.vector.types.pojo.ArrowType; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.arrow.vector.types.pojo.Schema; +import org.threeten.bp.Duration; + +/** + * This class demonstrates how to ingest data using Arrow format into BigQuery via the default + * stream. It initiates a DataWriter to establish a connection to BigQuery and reuses this + * connection to continuously ingest data. + * + *

Depending on the JDK version, you may need to include this into your VM options: {@code + * --add-opens=java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED}. See the documentation for + * more information. + */ +public class WriteToDefaultStreamTimestampWithArrow { + + public static final long NANOS = 1000000000L; + + public static void main(String[] args) throws InterruptedException, IOException { + if (args.length < 3) { + System.out.println( + "Usage: WriteToDefaultStreamWithArrow "); + return; + } + String projectId = args[0]; + String datasetName = args[1]; + String tableName = args[2]; + // For this sample, the table schema should contain 3 fields: + // ['timestampField': TIMESTAMP] + writeToDefaultStreamWithArrow(projectId, datasetName, tableName); + } + + private static Schema createArrowSchema() { + List fields = + ImmutableList.of( + new Field( + "timestampField", + FieldType.nullable(new ArrowType.Timestamp(TimeUnit.NANOSECOND, "UTC")), + null)); + return new Schema(fields, null); + } + + // Create an ArrowRecordBatch object that is compatible with the table schema. + private static ArrowRecordBatch buildRecordBatch(VectorSchemaRoot root, int rowCount) { + TimeStampNanoTZVector timestampField = (TimeStampNanoTZVector) root.getVector("timestampField"); + timestampField.allocateNew(rowCount); + + Instant now = Instant.now(); + for (int i = 0; i < rowCount; i++) { + timestampField.set(i, now.getEpochSecond() * NANOS + now.getNano()); + } + root.setRowCount(rowCount); + + CompressionCodec codec = + NoCompressionCodec.Factory.INSTANCE.createCodec(CompressionUtil.CodecType.NO_COMPRESSION); + VectorUnloader vectorUnloader = + new VectorUnloader(root, /* includeNullCount= */ true, codec, /* alignBuffers= */ true); + return vectorUnloader.getRecordBatch(); + } + + public static void writeToDefaultStreamWithArrow( + String projectId, String datasetName, String tableName) + throws InterruptedException, IOException { + TableName parentTable = TableName.of(projectId, datasetName, tableName); + Schema arrowSchema = createArrowSchema(); + DataWriter writer = new DataWriter(); + // One time initialization for the worker. + writer.initialize(parentTable, arrowSchema); + long initialRowCount = getRowCount(parentTable); + try (BufferAllocator allocator = new RootAllocator()) { + // A writer should be used to ingest as much data as possible before teardown. + // Append 100 batches. + for (int i = 0; i < 100; i++) { + try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { + // Each batch has 10 rows. + ArrowRecordBatch batch = buildRecordBatch(root, 10); + + // Asynchronous append. + writer.append(new ArrowData(arrowSchema, batch)); + } + } + } + // Final cleanup for the stream during worker teardown. + // It's blocked until all append requests' response are received. + writer.cleanup(); + + verifyExpectedRowCount(parentTable, initialRowCount + 1000); + System.out.println("Appended records successfully."); + } + + private static long getRowCount(TableName parentTable) throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = + BigQueryOptions.newBuilder().setProjectId(parentTable.getProject()).build().getService(); + TableResult results = bigquery.query(queryConfig); + return Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); + } + + private static void verifyExpectedRowCount(TableName parentTable, long expectedRowCount) + throws InterruptedException { + String queryRowCount = + "SELECT COUNT(*) FROM `" + + parentTable.getProject() + + "." + + parentTable.getDataset() + + "." + + parentTable.getTable() + + "`"; + QueryJobConfiguration queryConfig = QueryJobConfiguration.newBuilder(queryRowCount).build(); + BigQuery bigquery = + BigQueryOptions.newBuilder().setProjectId(parentTable.getProject()).build().getService(); + TableResult results = bigquery.query(queryConfig); + int countRowsActual = + Integer.parseInt(results.getValues().iterator().next().get("f0_").getStringValue()); + if (countRowsActual != expectedRowCount) { + throw new RuntimeException( + "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); + } + } + + private static class ArrowData { + Schema arrowSchema; + ArrowRecordBatch data; + + ArrowData(Schema arrowSchema, ArrowRecordBatch data) { + this.arrowSchema = arrowSchema; + this.data = data; + } + } + + private static class DataWriter { + + private static final int MAX_RECREATE_COUNT = 3; + + private BigQueryWriteClient client; + + // Track the number of in-flight requests to wait for all responses before shutting down. + private final Phaser inflightRequestCount = new Phaser(1); + private final Object lock = new Object(); + + private Schema arrowSchema; + private StreamWriter streamWriter; + + @GuardedBy("lock") + private RuntimeException error = null; + + private final AtomicInteger recreateCount = new AtomicInteger(0); + + private StreamWriter createStreamWriter(String streamName, Schema arrowSchema) + throws IOException { + // Configure in-stream automatic retry settings. + // Error codes that are immediately retried: + // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED + // Error codes that are retried with exponential backoff: + // * RESOURCE_EXHAUSTED + RetrySettings retrySettings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(500)) + .setRetryDelayMultiplier(1.1) + .setMaxAttempts(5) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .build(); + + // Use the Stream writer to send records in Arrow format. Specify the table name to write + // to the default stream. + // For more information about StreamWriter, see: + // https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/com.google.cloud.bigquery.storage.v1.StreamWriter + return StreamWriter.newBuilder(streamName, client) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) + .setChannelProvider( + BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() + .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveTimeout(org.threeten.bp.Duration.ofMinutes(1)) + .setKeepAliveWithoutCalls(true) + .build()) + .setEnableConnectionPool(true) + // If value is missing in ArrowRecordBatch and there is a default value configured on + // bigquery column, apply the default value to the missing value field. + .setDefaultMissingValueInterpretation( + AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE) + .setMaxRetryDuration(java.time.Duration.ofSeconds(5)) + // Set the StreamWriter with Arrow Schema, this would only allow the StreamWriter to + // append data in Arrow format. + .setWriterSchema(arrowSchema) + .setRetrySettings(retrySettings) + .build(); + } + + public void initialize(TableName parentTable, Schema arrowSchema) throws IOException { + // Initialize client without settings, internally within stream writer a new client will be + // created with full settings. + client = BigQueryWriteClient.create(); + + streamWriter = createStreamWriter(parentTable.toString() + "/_default", arrowSchema); + } + + public void append(ArrowData arrowData) throws IOException { + synchronized (this.lock) { + if (!streamWriter.isUserClosed() + && streamWriter.isClosed() + && recreateCount.getAndIncrement() < MAX_RECREATE_COUNT) { + streamWriter = createStreamWriter(streamWriter.getStreamName(), arrowData.arrowSchema); + this.error = null; + } + // If earlier appends have failed, we need to reset before continuing. + if (this.error != null) { + throw this.error; + } + } + // Append asynchronously for increased throughput. + ApiFuture future = streamWriter.append(arrowData.data); + ApiFutures.addCallback( + future, new AppendCompleteCallback(this, arrowData), MoreExecutors.directExecutor()); + + // Increase the count of in-flight requests. + inflightRequestCount.register(); + } + + public void cleanup() { + // Wait for all in-flight requests to complete. + inflightRequestCount.arriveAndAwaitAdvance(); + + client.close(); + // Close the connection to the server. + streamWriter.close(); + + // Verify that no error occurred in the stream. + synchronized (this.lock) { + if (this.error != null) { + throw this.error; + } + } + } + + static class AppendCompleteCallback implements ApiFutureCallback { + + private final DataWriter parent; + private final ArrowData arrowData; + + public AppendCompleteCallback(DataWriter parent, ArrowData arrowData) { + this.parent = parent; + this.arrowData = arrowData; + } + + public void onSuccess(AppendRowsResponse response) { + System.out.println("Append success"); + this.parent.recreateCount.set(0); + done(); + } + + public void onFailure(Throwable throwable) { + System.out.println("Append failed: " + throwable.toString()); + if (throwable instanceof Exceptions.AppendSerializationError) { + Exceptions.AppendSerializationError ase = (Exceptions.AppendSerializationError) throwable; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + if (!rowIndexToErrorMessage.isEmpty()) { + System.out.println("row level errors: " + rowIndexToErrorMessage); + // The append returned failure with indices for faulty rows. + // Fix the faulty rows or remove them from the appended data and retry the append. + done(); + return; + } + } + + boolean resendRequest = false; + if (throwable instanceof Exceptions.MaximumRequestCallbackWaitTimeExceededException) { + resendRequest = true; + } else if (throwable instanceof Exceptions.StreamWriterClosedException) { + if (!parent.streamWriter.isUserClosed()) { + resendRequest = true; + } + } + if (resendRequest) { + // Retry this request. + try { + this.parent.append(new ArrowData(arrowData.arrowSchema, arrowData.data)); + } catch (IOException e) { + throw new RuntimeException(e); + } + // Mark the existing attempt as done since we got a response for it + done(); + return; + } + + synchronized (this.parent.lock) { + if (this.parent.error == null) { + Exceptions.StorageException storageException = Exceptions.toStorageException(throwable); + this.parent.error = + (storageException != null) ? storageException : new RuntimeException(throwable); + } + } + done(); + } + + private void done() { + // Reduce the count of in-flight requests. + this.parent.inflightRequestCount.arriveAndDeregister(); + } + } + } +} +// [END bigquerystorage_timestamp_streamwriter_default_arrow] diff --git a/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java index d0bc455a9a..5db06efb04 100644 --- a/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java +++ b/samples/snippets/src/main/java/com/example/bigquerystorage/WriteToDefaultStreamWithArrow.java @@ -129,17 +129,17 @@ public static void writeToDefaultStreamWithArrow( // One time initialization for the worker. writer.initialize(parentTable, arrowSchema); long initialRowCount = getRowCount(parentTable); - BufferAllocator allocator = new RootAllocator(); - - // A writer should be used to ingest as much data as possible before teardown. - // Append 100 batches. - for (int i = 0; i < 100; i++) { - try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { - // Each batch has 10 rows. - ArrowRecordBatch batch = buildRecordBatch(root, 10); - - // Asynchronous append. - writer.append(new ArrowData(arrowSchema, batch)); + try (BufferAllocator allocator = new RootAllocator()) { + // A writer should be used to ingest as much data as possible before teardown. + // Append 100 batches. + for (int i = 0; i < 100; i++) { + try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) { + // Each batch has 10 rows. + ArrowRecordBatch batch = buildRecordBatch(root, 10); + + // Asynchronous append. + writer.append(new ArrowData(arrowSchema, batch)); + } } } // Final cleanup for the stream during worker teardown. @@ -180,8 +180,8 @@ private static void verifyExpectedRowCount(TableName parentTable, long expectedR BigQuery bigquery = BigQueryOptions.newBuilder().setProjectId(parentTable.getProject()).build().getService(); TableResult results = bigquery.query(queryConfig); - int countRowsActual = - Integer.parseInt(results.getValues().iterator().next().get("f0_").getStringValue()); + long countRowsActual = + Long.parseLong(results.getValues().iterator().next().get("f0_").getStringValue()); if (countRowsActual != expectedRowCount) { throw new RuntimeException( "Unexpected row count. Expected: " + expectedRowCount + ". Actual: " + countRowsActual); @@ -217,7 +217,7 @@ private static class DataWriter { private final AtomicInteger recreateCount = new AtomicInteger(0); private StreamWriter createStreamWriter(String streamName, Schema arrowSchema) - throws DescriptorValidationException, IOException, InterruptedException { + throws IOException { // Configure in-stream automatic retry settings. // Error codes that are immediately retried: // * ABORTED, UNAVAILABLE, CANCELLED, INTERNAL, DEADLINE_EXCEEDED @@ -236,7 +236,7 @@ private StreamWriter createStreamWriter(String streamName, Schema arrowSchema) // For more information about StreamWriter, see: // https://cloud.google.com/java/docs/reference/google-cloud-bigquerystorage/latest/com.google.cloud.bigquery.storage.v1.StreamWriter return StreamWriter.newBuilder(streamName, client) - .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(100))) + .setExecutorProvider(FixedExecutorProvider.create(Executors.newScheduledThreadPool(10))) .setChannelProvider( BigQueryWriteSettings.defaultGrpcTransportProviderBuilder() .setKeepAliveTime(org.threeten.bp.Duration.ofMinutes(1)) diff --git a/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampArrowIT.java b/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampArrowIT.java new file mode 100644 index 0000000000..f8f428dd90 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampArrowIT.java @@ -0,0 +1,55 @@ +/* + * Copyright 2026 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadTimestampArrowIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private PrintStream out; + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(null); + } + + @Test + public void testQuickstart() throws Exception { + ReadTimestampArrow.main(PROJECT_ID); + String got = bout.toString(); + // Ensure that `last_reported` column is in the output + assertThat(got).contains("last_reported"); + } +} diff --git a/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampAvroIT.java b/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampAvroIT.java new file mode 100644 index 0000000000..ae7420fab6 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/bigquerystorage/ReadTimestampAvroIT.java @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadTimestampAvroIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + } + + @After + public void tearDown() { + System.setOut(null); + } + + @Test + public void testReadTimestampAvro() throws Exception { + ReadTimestampAvro.main(PROJECT_ID); + String got = bout.toString(); + // Ensure that `last_reported` column is in the output + assertThat(got).contains("last_reported"); + } +} diff --git a/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJsonIT.java b/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJsonIT.java new file mode 100644 index 0000000000..0e5e8bcf80 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampJsonIT.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteToDefaultStreamTimestampJsonIT { + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar() { + assertNotNull( + "Environment variable " + "GOOGLE_CLOUD_PROJECT" + " is required to perform these tests.", + System.getenv("GOOGLE_CLOUD_PROJECT")); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar(); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of(Field.newBuilder("timestampField", StandardSQLTypeName.TIMESTAMP).build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), + BigQuery.DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteToDefaultStream() throws Exception { + WriteToDefaultStreamTimestampJson.writeToDefaultStream( + GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrowIT.java b/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrowIT.java new file mode 100644 index 0000000000..2561de9859 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/bigquerystorage/WriteToDefaultStreamTimestampWithArrowIT.java @@ -0,0 +1,96 @@ +/* + * Copyright 2026 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigquerystorage; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.bigquery.BigQuery; +import com.google.cloud.bigquery.BigQueryOptions; +import com.google.cloud.bigquery.DatasetId; +import com.google.cloud.bigquery.DatasetInfo; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.Schema; +import com.google.cloud.bigquery.StandardSQLTypeName; +import com.google.cloud.bigquery.StandardTableDefinition; +import com.google.cloud.bigquery.TableId; +import com.google.cloud.bigquery.TableInfo; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.UUID; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class WriteToDefaultStreamTimestampWithArrowIT { + private static final String GOOGLE_CLOUD_PROJECT = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private BigQuery bigquery; + private String datasetName; + private String tableName; + + private static void requireEnvVar() { + assertNotNull( + "Environment variable GOOGLE_CLOUD_PROJECT is required to perform these tests.", + System.getenv("GOOGLE_CLOUD_PROJECT")); + } + + @BeforeClass + public static void checkRequirements() { + requireEnvVar(); + } + + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + + bigquery = BigQueryOptions.getDefaultInstance().getService(); + + // Create a new dataset and table for each test. + datasetName = "WRITE_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + tableName = "DEFAULT_STREAM_TEST" + UUID.randomUUID().toString().substring(0, 8); + Schema schema = + Schema.of(Field.newBuilder("timestampField", StandardSQLTypeName.TIMESTAMP).build()); + bigquery.create(DatasetInfo.newBuilder(datasetName).build()); + TableInfo tableInfo = + TableInfo.newBuilder(TableId.of(datasetName, tableName), StandardTableDefinition.of(schema)) + .build(); + bigquery.create(tableInfo); + } + + @After + public void tearDown() { + bigquery.delete( + DatasetId.of(GOOGLE_CLOUD_PROJECT, datasetName), + BigQuery.DatasetDeleteOption.deleteContents()); + System.setOut(null); + } + + @Test + public void testWriteToDefaultStream() throws Exception { + WriteToDefaultStreamTimestampJson.writeToDefaultStream( + GOOGLE_CLOUD_PROJECT, datasetName, tableName); + assertThat(bout.toString()).contains("Appended records successfully."); + } +} diff --git a/versions.txt b/versions.txt index 3d184de1c8..7797b885b3 100644 --- a/versions.txt +++ b/versions.txt @@ -1,14 +1,14 @@ # Format: # module:released-version:current-version -google-cloud-bigquerystorage:3.18.0:3.18.0 -grpc-google-cloud-bigquerystorage-v1beta1:0.190.0:0.190.0 -grpc-google-cloud-bigquerystorage-v1beta2:0.190.0:0.190.0 -grpc-google-cloud-bigquerystorage-v1:3.18.0:3.18.0 -proto-google-cloud-bigquerystorage-v1beta1:0.190.0:0.190.0 -proto-google-cloud-bigquerystorage-v1beta2:0.190.0:0.190.0 -proto-google-cloud-bigquerystorage-v1:3.18.0:3.18.0 -grpc-google-cloud-bigquerystorage-v1alpha:3.18.0:3.18.0 -proto-google-cloud-bigquerystorage-v1alpha:3.18.0:3.18.0 -proto-google-cloud-bigquerystorage-v1beta:3.18.0:3.18.0 -grpc-google-cloud-bigquerystorage-v1beta:3.18.0:3.18.0 +google-cloud-bigquerystorage:3.19.1:3.19.1 +grpc-google-cloud-bigquerystorage-v1beta1:0.191.1:0.191.1 +grpc-google-cloud-bigquerystorage-v1beta2:0.191.1:0.191.1 +grpc-google-cloud-bigquerystorage-v1:3.19.1:3.19.1 +proto-google-cloud-bigquerystorage-v1beta1:0.191.1:0.191.1 +proto-google-cloud-bigquerystorage-v1beta2:0.191.1:0.191.1 +proto-google-cloud-bigquerystorage-v1:3.19.1:3.19.1 +grpc-google-cloud-bigquerystorage-v1alpha:3.19.1:3.19.1 +proto-google-cloud-bigquerystorage-v1alpha:3.19.1:3.19.1 +proto-google-cloud-bigquerystorage-v1beta:3.19.1:3.19.1 +grpc-google-cloud-bigquerystorage-v1beta:3.19.1:3.19.1